Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (577 commits)
  Staging: ramzswap: Handler for swap slot free callback
  swap: Add swap slot free callback to block_device_operations
  swap: Add flag to identify block swap devices
  Staging: vt6655: use ETH_FRAME_LEN macro instead of custom one
  Staging: vt6655: use ETH_DATA_LEN macro instead of custom one
  Staging: vt6655: use ETH_FCS_LEN macro instead of custom one
  Staging: vt6656: use ETH_HLEN macro instead of custom one
  Staging: comedi: quatech_daqp_cs.c Replace eos semaphore with a completion.
  Staging: dt3155v4l: remove private memory allocator
  Staging: crystalhd: Remove typedefs from driver
  Staging: winbond: Fix for pointer name format issue in mds.c
  Staging: vt6656: removed custom UCHAR/USHORT/UINT/ULONG/ULONGLONG typedefs
  Staging: vt6656: removed custom CHAR/SHORT/INT/LONG typedefs
  Staging: comedi: Altered the way printk is used in 8255.c
  staging: iio: adis16350 and similar IMU driver
  Staging: iio: max1363 Fix two bugs in single_channel_from_ring
  Staging: iio: adis16220 extract bin_attribute structures from state
  Staging: iio: adis16220 vibration sensor driver
  Staging: comedi: Kconfig dependancy fixes
  Staging: comedi: fix up build error from last Kconfig changes
  ...
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-picolcd b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
new file mode 100644
index 0000000..08579e7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
@@ -0,0 +1,43 @@
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode
+Date:		March 2010
+Contact:	Bruno Prémont <bonbons@linux-vserver.org>
+Description:	Make it possible to switch the PicoLCD device between LCD
+		(firmware) and bootloader (flasher) operation modes.
+
+		Reading: returns list of available modes, the active mode being
+		enclosed in brackets ('[' and ']')
+
+		Writing: causes operation mode switch. Permitted values are
+		the non-active mode names listed when read.
+
+		Note: when switching mode the current PicoLCD HID device gets
+		disconnected and reconnects after above delay (see attribute
+		operation_mode_delay for its value).
+
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode_delay
+Date:		April 2010
+Contact:	Bruno Prémont <bonbons@linux-vserver.org>
+Description:	Delay PicoLCD waits before restarting in new mode when
+		operation_mode has changed.
+
+		Reading/Writing: It is expressed in ms and permitted range is
+		0..30000ms.
+
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/fb_update_rate
+Date:		March 2010
+Contact:	Bruno Prémont <bonbons@linux-vserver.org>
+Description:	Make it possible to adjust defio refresh rate.
+
+		Reading: returns list of available refresh rates (expressed in Hz),
+		the active refresh rate being enclosed in brackets ('[' and ']')
+
+		Writing: accepts new refresh rate expressed in integer Hz
+		within permitted rates.
+
+		Note: As device can barely do 2 complete refreshes a second
+		it only makes sense to adjust this value if only one or two
+		tiles get changed and it's not appropriate to expect the application
+		to flush it's tiny changes explicitely at higher than default rate.
+
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-prodikeys b/Documentation/ABI/testing/sysfs-driver-hid-prodikeys
new file mode 100644
index 0000000..05d988c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-prodikeys
@@ -0,0 +1,29 @@
+What:		/sys/bus/hid/drivers/prodikeys/.../channel
+Date:		April 2010
+KernelVersion:	2.6.34
+Contact:	Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+		Allows control (via software) the midi channel to which
+		that the pc-midi keyboard will output.midi data.
+		Range: 0..15
+		Type:  Read/write
+What:		/sys/bus/hid/drivers/prodikeys/.../sustain
+Date:		April 2010
+KernelVersion:	2.6.34
+Contact:	Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+		Allows control (via software) the sustain duration of a
+		note held by the pc-midi driver.
+		0 means sustain mode is disabled.
+		Range: 0..5000 (milliseconds)
+		Type:  Read/write
+What:		/sys/bus/hid/drivers/prodikeys/.../octave
+Date:		April 2010
+KernelVersion:	2.6.34
+Contact:	Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+		Controls the octave shift modifier in the pc-midi driver.
+		The octave can be shifted via software up/down 2 octaves.
+		0 means the no ocatve shift.
+		Range: -2..2 (minus 2 to plus 2)
+		Type: Read/Write
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
new file mode 100644
index 0000000..88340a2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
@@ -0,0 +1,111 @@
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_dpi
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	It is possible to switch the dpi setting of the mouse with the
+		press of a button.
+		When read, this file returns the raw number of the actual dpi
+		setting reported by the mouse. This number has to be further
+		processed to receive the real dpi value.
+
+		VALUE DPI
+		1     800
+		2     1200
+		3     1600
+		4     2000
+		5     2400
+		6     3200
+
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the number of the actual profile.
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the raw integer version number of the
+		firmware reported by the mouse. Using the integer value eases
+		further usage in other programs. To receive the real version
+		number the decimal point has to be shifted 2 positions to the
+		left. E.g. a returned value of 138 means 1.38
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/kone_driver_version
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the driver version.
+		The format of the string is "v<major>.<minor>.<patchlevel>".
+		This attribute is used by the userland tools to find the sysfs-
+		paths of installed kone-mice and determine the capabilites of
+		the driver. Versions of this driver for old kernels replace
+		usbhid instead of generic-usb. The way to scan for this file
+		has been chosen to provide a consistent way for all supported
+		kernel versions.
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+                press of a button. A profile holds informations like button
+                mappings, sensitivity, the colors of the 5 leds and light
+                effects.
+                When read, these files return the respective profile. The
+                returned data is 975 bytes in size.
+		When written, this file lets one write the respective profile
+		data back to the mouse. The data has to be 975 bytes long.
+		The mouse will reject invalid data, whereas the profile number
+		stored in the profile doesn't need to fit the number of the
+		store.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the settings stored in the mouse.
+		The size of the data is 36 bytes and holds information like the
+		startup_profile, tcu state and calibration_data.
+		When written, this file lets write settings back to the mouse.
+		The data has to be 36 bytes long. The mouse will reject invalid
+		data.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The integer value of this attribute ranges from 1 to 5.
+                When read, this attribute returns the number of the profile
+                that's active when the mouse is powered on.
+		When written, this file sets the number of the startup profile
+		and the mouse activates this profile immediately.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/tcu
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse has a "Tracking Control Unit" which lets the user
+		calibrate the laser power to fit the mousepad surface.
+		When read, this file returns the current state of the TCU,
+		where 0 means off and 1 means on.
+		Writing 0 in this file will switch the TCU off.
+		Writing 1 in this file will start the calibration which takes
+		around 6 seconds to complete and activates the TCU.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/weight
+Date:		March 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can be equipped with one of four supplied weights
+		ranging from 5 to 20 grams which are recognized by the mouse
+		and its value can be read out. When read, this file returns the
+		raw value returned by the mouse which eases further processing
+		in other software.
+		The values map to the weights as follows:
+
+		VALUE WEIGHT
+		0     none
+		1     5g
+		2     10g
+		3     15g
+		4     20g
+
+		This file is readonly.
diff --git a/Documentation/ABI/testing/sysfs-wacom b/Documentation/ABI/testing/sysfs-wacom
new file mode 100644
index 0000000..1517976
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-wacom
@@ -0,0 +1,10 @@
+What:		/sys/class/hidraw/hidraw*/device/speed
+Date:		April 2010
+Kernel Version:	2.6.35
+Contact:	linux-bluetooth@vger.kernel.org
+Description:
+		The /sys/class/hidraw/hidraw*/device/speed file controls
+		reporting speed of wacom bluetooth tablet. Reading from
+		this file returns 1 if tablet reports in high speed mode
+		or 0 otherwise. Writing to this file one of these values
+		switches reporting speed.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 325cfd1..c7e5dc7 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,7 @@
 	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
 	    mac80211.xml debugobjects.xml sh.xml regulator.xml \
 	    alsa-driver-api.xml writing-an-alsa-driver.xml \
-	    tracepoint.xml media.xml
+	    tracepoint.xml media.xml drm.xml
 
 ###
 # The build process is as follows (targets):
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
new file mode 100644
index 0000000..7583dc7
--- /dev/null
+++ b/Documentation/DocBook/drm.tmpl
@@ -0,0 +1,839 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="drmDevelopersGuide">
+  <bookinfo>
+    <title>Linux DRM Developer's Guide</title>
+
+    <copyright>
+      <year>2008-2009</year>
+      <holder>
+	Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
+      </holder>
+    </copyright>
+
+    <legalnotice>
+      <para>
+	The contents of this file may be used under the terms of the GNU
+	General Public License version 2 (the "GPL") as distributed in
+	the kernel source COPYING file.
+      </para>
+    </legalnotice>
+  </bookinfo>
+
+<toc></toc>
+
+  <!-- Introduction -->
+
+  <chapter id="drmIntroduction">
+    <title>Introduction</title>
+    <para>
+      The Linux DRM layer contains code intended to support the needs
+      of complex graphics devices, usually containing programmable
+      pipelines well suited to 3D graphics acceleration.  Graphics
+      drivers in the kernel can make use of DRM functions to make
+      tasks like memory management, interrupt handling and DMA easier,
+      and provide a uniform interface to applications.
+    </para>
+    <para>
+      A note on versions: this guide covers features found in the DRM
+      tree, including the TTM memory manager, output configuration and
+      mode setting, and the new vblank internals, in addition to all
+      the regular features found in current kernels.
+    </para>
+    <para>
+      [Insert diagram of typical DRM stack here]
+    </para>
+  </chapter>
+
+  <!-- Internals -->
+
+  <chapter id="drmInternals">
+    <title>DRM Internals</title>
+    <para>
+      This chapter documents DRM internals relevant to driver authors
+      and developers working to add support for the latest features to
+      existing drivers.
+    </para>
+    <para>
+      First, we'll go over some typical driver initialization
+      requirements, like setting up command buffers, creating an
+      initial output configuration, and initializing core services.
+      Subsequent sections will cover core internals in more detail,
+      providing implementation notes and examples.
+    </para>
+    <para>
+      The DRM layer provides several services to graphics drivers,
+      many of them driven by the application interfaces it provides
+      through libdrm, the library that wraps most of the DRM ioctls.
+      These include vblank event handling, memory
+      management, output management, framebuffer management, command
+      submission &amp; fencing, suspend/resume support, and DMA
+      services.
+    </para>
+    <para>
+      The core of every DRM driver is struct drm_device.  Drivers
+      will typically statically initialize a drm_device structure,
+      then pass it to drm_init() at load time.
+    </para>
+
+  <!-- Internals: driver init -->
+
+  <sect1>
+    <title>Driver initialization</title>
+    <para>
+      Before calling the DRM initialization routines, the driver must
+      first create and fill out a struct drm_device structure.
+    </para>
+    <programlisting>
+      static struct drm_driver driver = {
+	/* don't use mtrr's here, the Xserver or user space app should
+	 * deal with them for intel hardware.
+	 */
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
+	.load = i915_driver_load,
+	.unload = i915_driver_unload,
+	.firstopen = i915_driver_firstopen,
+	.lastclose = i915_driver_lastclose,
+	.preclose = i915_driver_preclose,
+	.save = i915_save,
+	.restore = i915_restore,
+	.device_is_agp = i915_driver_device_is_agp,
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i915_enable_vblank,
+	.disable_vblank = i915_disable_vblank,
+	.irq_preinstall = i915_driver_irq_preinstall,
+	.irq_postinstall = i915_driver_irq_postinstall,
+	.irq_uninstall = i915_driver_irq_uninstall,
+	.irq_handler = i915_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.fb_probe = intelfb_probe,
+	.fb_remove = intelfb_remove,
+	.fb_resize = intelfb_resize,
+	.master_create = i915_master_create,
+	.master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = i915_debugfs_init,
+	.debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+	.gem_init_object = i915_gem_init_object,
+	.gem_free_object = i915_gem_free_object,
+	.gem_vm_ops = &amp;i915_gem_vm_ops,
+	.ioctls = i915_ioctls,
+	.fops = {
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		.compat_ioctl = i915_compat_ioctl,
+#endif
+		},
+	.pci_driver = {
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
+		.probe = probe,
+		.remove = __devexit_p(drm_cleanup_pci),
+		},
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+      };
+    </programlisting>
+    <para>
+      In the example above, taken from the i915 DRM driver, the driver
+      sets several flags indicating what core features it supports.
+      We'll go over the individual callbacks in later sections.  Since
+      flags indicate which features your driver supports to the DRM
+      core, you need to set most of them prior to calling drm_init().  Some,
+      like DRIVER_MODESET can be set later based on user supplied parameters,
+      but that's the exception rather than the rule.
+    </para>
+    <variablelist>
+      <title>Driver flags</title>
+      <varlistentry>
+	<term>DRIVER_USE_AGP</term>
+	<listitem><para>
+	    Driver uses AGP interface
+	</para></listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_REQUIRE_AGP</term>
+	<listitem><para>
+	    Driver needs AGP interface to function.
+	</para></listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_USE_MTRR</term>
+	<listitem>
+	  <para>
+	    Driver uses MTRR interface for mapping memory.  Deprecated.
+	  </para>
+	</listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_PCI_DMA</term>
+	<listitem><para>
+	    Driver is capable of PCI DMA.  Deprecated.
+	</para></listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_SG</term>
+	<listitem><para>
+	    Driver can perform scatter/gather DMA.  Deprecated.
+	</para></listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_HAVE_DMA</term>
+	<listitem><para>Driver supports DMA.  Deprecated.</para></listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+	<listitem>
+	  <para>
+	    DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
+	    handler, DRIVER_IRQ_SHARED indicates whether the device &amp;
+	    handler support shared IRQs (note that this is required of
+	    PCI drivers).
+	  </para>
+	</listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_DMA_QUEUE</term>
+	<listitem>
+	  <para>
+	    If the driver queues DMA requests and completes them
+	    asynchronously, this flag should be set.  Deprecated.
+	  </para>
+	</listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_FB_DMA</term>
+	<listitem>
+	  <para>
+	    Driver supports DMA to/from the framebuffer.  Deprecated.
+	  </para>
+	</listitem>
+      </varlistentry>
+      <varlistentry>
+	<term>DRIVER_MODESET</term>
+	<listitem>
+	  <para>
+	    Driver supports mode setting interfaces.
+	  </para>
+	</listitem>
+      </varlistentry>
+    </variablelist>
+    <para>
+      In this specific case, the driver requires AGP and supports
+      IRQs.  DMA, as we'll see, is handled by device specific ioctls
+      in this case.  It also supports the kernel mode setting APIs, though
+      unlike in the actual i915 driver source, this example unconditionally
+      exports KMS capability.
+    </para>
+  </sect1>
+
+  <!-- Internals: driver load -->
+
+  <sect1>
+    <title>Driver load</title>
+    <para>
+      In the previous section, we saw what a typical drm_driver
+      structure might look like.  One of the more important fields in
+      the structure is the hook for the load function.
+    </para>
+    <programlisting>
+      static struct drm_driver driver = {
+      	...
+      	.load = i915_driver_load,
+        ...
+      };
+    </programlisting>
+    <para>
+      The load function has many responsibilities: allocating a driver
+      private structure, specifying supported performance counters,
+      configuring the device (e.g. mapping registers &amp; command
+      buffers), initializing the memory manager, and setting up the
+      initial output configuration.
+    </para>
+    <para>
+      Note that the tasks performed at driver load time must not
+      conflict with DRM client requirements.  For instance, if user
+      level mode setting drivers are in use, it would be problematic
+      to perform output discovery &amp; configuration at load time.
+      Likewise, if pre-memory management aware user level drivers are
+      in use, memory management and command buffer setup may need to
+      be omitted.  These requirements are driver specific, and care
+      needs to be taken to keep both old and new applications and
+      libraries working.  The i915 driver supports the "modeset"
+      module parameter to control whether advanced features are
+      enabled at load time or in legacy fashion.  If compatibility is
+      a concern (e.g. with drivers converted over to the new interfaces
+      from the old ones), care must be taken to prevent incompatible
+      device initialization and control with the currently active
+      userspace drivers.
+    </para>
+
+    <sect2>
+      <title>Driver private &amp; performance counters</title>
+      <para>
+	The driver private hangs off the main drm_device structure and
+	can be used for tracking various device specific bits of
+	information, like register offsets, command buffer status,
+	register state for suspend/resume, etc.  At load time, a
+	driver can simply allocate one and set drm_device.dev_priv
+	appropriately; at unload the driver can free it and set
+	drm_device.dev_priv to NULL.
+      </para>
+      <para>
+	The DRM supports several counters which can be used for rough
+	performance characterization.  Note that the DRM stat counter
+	system is not often used by applications, and supporting
+	additional counters is completely optional.
+      </para>
+      <para>
+	These interfaces are deprecated and should not be used.  If performance
+	monitoring is desired, the developer should investigate and
+	potentially enhance the kernel perf and tracing infrastructure to export
+	GPU related performance information to performance monitoring
+	tools and applications.
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>Configuring the device</title>
+      <para>
+	Obviously, device configuration will be device specific.
+	However, there are several common operations: finding a
+	device's PCI resources, mapping them, and potentially setting
+	up an IRQ handler.
+      </para>
+      <para>
+	Finding &amp; mapping resources is fairly straightforward.  The
+	DRM wrapper functions, drm_get_resource_start() and
+	drm_get_resource_len() can be used to find BARs on the given
+	drm_device struct.  Once those values have been retrieved, the
+	driver load function can call drm_addmap() to create a new
+	mapping for the BAR in question.  Note you'll probably want a
+	drm_local_map_t in your driver private structure to track any
+	mappings you create.
+<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
+<!-- !Finclude/drm/drmP.h drm_local_map_t -->
+      </para>
+      <para>
+	if compatibility with other operating systems isn't a concern
+	(DRM drivers can run under various BSD variants and OpenSolaris),
+	native Linux calls can be used for the above, e.g. pci_resource_*
+	and iomap*/iounmap.  See the Linux device driver book for more
+	info.
+      </para>
+      <para>
+	Once you have a register map, you can use the DRM_READn() and
+	DRM_WRITEn() macros to access the registers on your device, or
+	use driver specific versions to offset into your MMIO space
+	relative to a driver specific base pointer (see I915_READ for
+	example).
+      </para>
+      <para>
+	If your device supports interrupt generation, you may want to
+	setup an interrupt handler at driver load time as well.  This
+	is done using the drm_irq_install() function.  If your device
+	supports vertical blank interrupts, it should call
+	drm_vblank_init() to initialize the core vblank handling code before
+	enabling interrupts on your device.  This ensures the vblank related
+	structures are allocated and allows the core to handle vblank events.
+      </para>
+<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+      <para>
+	Once your interrupt handler is registered (it'll use your
+	drm_driver.irq_handler as the actual interrupt handling
+	function), you can safely enable interrupts on your device,
+	assuming any other state your interrupt handler uses is also
+	initialized.
+      </para>
+      <para>
+	Another task that may be necessary during configuration is
+	mapping the video BIOS.  On many devices, the VBIOS describes
+	device configuration, LCD panel timings (if any), and contains
+	flags indicating device state.  Mapping the BIOS can be done
+	using the pci_map_rom() call, a convenience function that
+	takes care of mapping the actual ROM, whether it has been
+	shadowed into memory (typically at address 0xc0000) or exists
+	on the PCI device in the ROM BAR.  Note that once you've
+	mapped the ROM and extracted any necessary information, be
+	sure to unmap it; on many devices the ROM address decoder is
+	shared with other BARs, so leaving it mapped can cause
+	undesired behavior like hangs or memory corruption.
+<!--!Fdrivers/pci/rom.c pci_map_rom-->
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>Memory manager initialization</title>
+      <para>
+	In order to allocate command buffers, cursor memory, scanout
+	buffers, etc., as well as support the latest features provided
+	by packages like Mesa and the X.Org X server, your driver
+	should support a memory manager.
+      </para>
+      <para>
+	If your driver supports memory management (it should!), you'll
+	need to set that up at load time as well.  How you intialize
+	it depends on which memory manager you're using, TTM or GEM.
+      </para>
+      <sect3>
+	<title>TTM initialization</title>
+	<para>
+	  TTM (for Translation Table Manager) manages video memory and
+	  aperture space for graphics devices. TTM supports both UMA devices
+	  and devices with dedicated video RAM (VRAM), i.e. most discrete
+	  graphics devices.  If your device has dedicated RAM, supporting
+	  TTM is desireable.  TTM also integrates tightly with your
+	  driver specific buffer execution function.  See the radeon
+	  driver for examples.
+	</para>
+	<para>
+	  The core TTM structure is the ttm_bo_driver struct.  It contains
+	  several fields with function pointers for initializing the TTM,
+	  allocating and freeing memory, waiting for command completion
+	  and fence synchronization, and memory migration.  See the
+	  radeon_ttm.c file for an example of usage.
+	</para>
+	<para>
+	  The ttm_global_reference structure is made up of several fields:
+	</para>
+	<programlisting>
+	  struct ttm_global_reference {
+	  	enum ttm_global_types global_type;
+	  	size_t size;
+	  	void *object;
+	  	int (*init) (struct ttm_global_reference *);
+	  	void (*release) (struct ttm_global_reference *);
+	  };
+	</programlisting>
+	<para>
+	  There should be one global reference structure for your memory
+	  manager as a whole, and there will be others for each object
+	  created by the memory manager at runtime.  Your global TTM should
+	  have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
+	  object should be sizeof(struct ttm_mem_global), and the init and
+	  release hooks should point at your driver specific init and
+	  release routines, which will probably eventually call
+	  ttm_mem_global_init and ttm_mem_global_release respectively.
+	</para>
+	<para>
+	  Once your global TTM accounting structure is set up and initialized
+	  (done by calling ttm_global_item_ref on the global object you
+	  just created), you'll need to create a buffer object TTM to
+	  provide a pool for buffer object allocation by clients and the
+	  kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
+	  and its size should be sizeof(struct ttm_bo_global).  Again,
+	  driver specific init and release functions can be provided,
+	  likely eventually calling ttm_bo_global_init and
+	  ttm_bo_global_release, respectively.  Also like the previous
+	  object, ttm_global_item_ref is used to create an initial reference
+	  count for the TTM, which will call your initalization function.
+	</para>
+      </sect3>
+      <sect3>
+	<title>GEM initialization</title>
+	<para>
+	  GEM is an alternative to TTM, designed specifically for UMA
+	  devices.  It has simpler initialization and execution requirements
+	  than TTM, but has no VRAM management capability.  Core GEM
+	  initialization is comprised of a basic drm_mm_init call to create
+	  a GTT DRM MM object, which provides an address space pool for
+	  object allocation.  In a KMS configuration, the driver will
+	  need to allocate and initialize a command ring buffer following
+	  basic GEM initialization.  Most UMA devices have a so-called
+	  "stolen" memory region, which provides space for the initial
+	  framebuffer and large, contiguous memory regions required by the
+	  device.  This space is not typically managed by GEM, and must
+	  be initialized separately into its own DRM MM object.
+	</para>
+	<para>
+	  Initialization will be driver specific, and will depend on
+	  the architecture of the device.  In the case of Intel
+	  integrated graphics chips like 965GM, GEM initialization can
+	  be done by calling the internal GEM init function,
+	  i915_gem_do_init().  Since the 965GM is a UMA device
+	  (i.e. it doesn't have dedicated VRAM), GEM will manage
+	  making regular RAM available for GPU operations.  Memory set
+	  aside by the BIOS (called "stolen" memory by the i915
+	  driver) will be managed by the DRM memrange allocator; the
+	  rest of the aperture will be managed by GEM.
+	  <programlisting>
+	    /* Basic memrange allocator for stolen space (aka vram) */
+	    drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
+	    /* Let GEM Manage from end of prealloc space to end of aperture */
+	    i915_gem_do_init(dev, prealloc_size, agp_size);
+	  </programlisting>
+<!--!Edrivers/char/drm/drm_memrange.c-->
+	</para>
+	<para>
+	  Once the memory manager has been set up, we can allocate the
+	  command buffer.  In the i915 case, this is also done with a
+	  GEM function, i915_gem_init_ringbuffer().
+	</para>
+      </sect3>
+    </sect2>
+
+    <sect2>
+      <title>Output configuration</title>
+      <para>
+	The final initialization task is output configuration.  This involves
+	finding and initializing the CRTCs, encoders and connectors
+	for your device, creating an initial configuration and
+	registering a framebuffer console driver.
+      </para>
+      <sect3>
+	<title>Output discovery and initialization</title>
+	<para>
+	  Several core functions exist to create CRTCs, encoders and
+	  connectors, namely drm_crtc_init(), drm_connector_init() and
+	  drm_encoder_init(), along with several "helper" functions to
+	  perform common tasks.
+	</para>
+	<para>
+	  Connectors should be registered with sysfs once they've been
+	  detected and initialized, using the
+	  drm_sysfs_connector_add() function.  Likewise, when they're
+	  removed from the system, they should be destroyed with
+	  drm_sysfs_connector_remove().
+	</para>
+	<programlisting>
+<![CDATA[
+void intel_crt_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct intel_output *intel_output;
+
+	intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+	if (!intel_output)
+		return;
+
+	connector = &intel_output->base;
+	drm_connector_init(dev, &intel_output->base,
+			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+			 DRM_MODE_ENCODER_DAC);
+
+	drm_mode_connector_attach_encoder(&intel_output->base,
+					  &intel_output->enc);
+
+	/* Set up the DDC bus. */
+	intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+	if (!intel_output->ddc_bus) {
+		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+			   "failed.\n");
+		return;
+	}
+
+	intel_output->type = INTEL_OUTPUT_ANALOG;
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+}
+]]>
+	</programlisting>
+	<para>
+	  In the example above (again, taken from the i915 driver), a
+	  CRT connector and encoder combination is created.  A device
+	  specific i2c bus is also created, for fetching EDID data and
+	  performing monitor detection.  Once the process is complete,
+	  the new connector is regsitered with sysfs, to make its
+	  properties available to applications.
+	</para>
+	<sect4>
+	  <title>Helper functions and core functions</title>
+	  <para>
+	    Since many PC-class graphics devices have similar display output
+	    designs, the DRM provides a set of helper functions to make
+	    output management easier.  The core helper routines handle
+	    encoder re-routing and disabling of unused functions following
+	    mode set.  Using the helpers is optional, but recommended for
+	    devices with PC-style architectures (i.e. a set of display planes
+	    for feeding pixels to encoders which are in turn routed to
+	    connectors).  Devices with more complex requirements needing
+	    finer grained management can opt to use the core callbacks
+	    directly.
+	  </para>
+	  <para>
+	    [Insert typical diagram here.]  [Insert OMAP style config here.]
+	  </para>
+	</sect4>
+	<para>
+	  For each encoder, CRTC and connector, several functions must
+	  be provided, depending on the object type.  Encoder objects
+	  need should provide a DPMS (basically on/off) function, mode fixup
+	  (for converting requested modes into native hardware timings),
+	  and prepare, set and commit functions for use by the core DRM
+	  helper functions.  Connector helpers need to provide mode fetch and
+	  validity functions as well as an encoder matching function for
+	  returing an ideal encoder for a given connector.  The core
+	  connector functions include a DPMS callback, (deprecated)
+	  save/restore routines, detection, mode probing, property handling,
+	  and cleanup functions.
+	</para>
+<!--!Edrivers/char/drm/drm_crtc.h-->
+<!--!Edrivers/char/drm/drm_crtc.c-->
+<!--!Edrivers/char/drm/drm_crtc_helper.c-->
+      </sect3>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: vblank handling -->
+
+  <sect1>
+    <title>VBlank event handling</title>
+    <para>
+      The DRM core exposes two vertical blank related ioctls:
+      DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
+<!--!Edrivers/char/drm/drm_irq.c-->
+    </para>
+    <para>
+      DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
+      as its argument, and is used to block or request a signal when a
+      specified vblank event occurs.
+    </para>
+    <para>
+      DRM_IOCTL_MODESET_CTL should be called by application level
+      drivers before and after mode setting, since on many devices the
+      vertical blank counter will be reset at that time.  Internally,
+      the DRM snapshots the last vblank count when the ioctl is called
+      with the _DRM_PRE_MODESET command so that the counter won't go
+      backwards (which is dealt with when _DRM_POST_MODESET is used).
+    </para>
+    <para>
+      To support the functions above, the DRM core provides several
+      helper functions for tracking vertical blank counters, and
+      requires drivers to provide several callbacks:
+      get_vblank_counter(), enable_vblank() and disable_vblank().  The
+      core uses get_vblank_counter() to keep the counter accurate
+      across interrupt disable periods.  It should return the current
+      vertical blank event count, which is often tracked in a device
+      register.  The enable and disable vblank callbacks should enable
+      and disable vertical blank interrupts, respectively.  In the
+      absence of DRM clients waiting on vblank events, the core DRM
+      code will use the disable_vblank() function to disable
+      interrupts, which saves power.  They'll be re-enabled again when
+      a client calls the vblank wait ioctl above.
+    </para>
+    <para>
+      Devices that don't provide a count register can simply use an
+      internal atomic counter incremented on every vertical blank
+      interrupt, and can make their enable and disable vblank
+      functions into no-ops.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Memory management</title>
+    <para>
+      The memory manager lies at the heart of many DRM operations, and
+      is also required to support advanced client features like OpenGL
+      pbuffers.  The DRM currently contains two memory managers, TTM
+      and GEM.
+    </para>
+
+    <sect2>
+      <title>The Translation Table Manager (TTM)</title>
+      <para>
+	TTM was developed by Tungsten Graphics, primarily by Thomas
+	Hellström, and is intended to be a flexible, high performance
+	graphics memory manager.
+      </para>
+      <para>
+	Drivers wishing to support TTM must fill out a drm_bo_driver
+	structure.
+      </para>
+      <para>
+	TTM design background and information belongs here.
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>The Graphics Execution Manager (GEM)</title>
+      <para>
+	GEM is an Intel project, authored by Eric Anholt and Keith
+	Packard.  It provides simpler interfaces than TTM, and is well
+	suited for UMA devices.
+      </para>
+      <para>
+	GEM-enabled drivers must provide gem_init_object() and
+	gem_free_object() callbacks to support the core memory
+	allocation routines.  They should also provide several driver
+	specific ioctls to support command execution, pinning, buffer
+	read &amp; write, mapping, and domain ownership transfers.
+      </para>
+      <para>
+	On a fundamental level, GEM involves several operations: memory
+	allocation and freeing, command execution, and aperture management
+	at command execution time.  Buffer object allocation is relatively
+	straightforward and largely provided by Linux's shmem layer, which
+	provides memory to back each object.  When mapped into the GTT
+	or used in a command buffer, the backing pages for an object are
+	flushed to memory and marked write combined so as to be coherent
+	with the GPU.  Likewise, when the GPU finishes rendering to an object,
+	if the CPU accesses it, it must be made coherent with the CPU's view
+	of memory, usually involving GPU cache flushing of various kinds.
+	This core CPU&lt;-&gt;GPU coherency management is provided by the GEM
+	set domain function, which evaluates an object's current domain and
+	performs any necessary flushing or synchronization to put the object
+	into the desired coherency domain (note that the object may be busy,
+	i.e. an active render target; in that case the set domain function
+	will block the client and wait for rendering to complete before
+	performing any necessary flushing operations).
+      </para>
+      <para>
+	Perhaps the most important GEM function is providing a command
+	execution interface to clients.  Client programs construct command
+	buffers containing references to previously allocated memory objects
+	and submit them to GEM.  At that point, GEM will take care to bind
+	all the objects into the GTT, execute the buffer, and provide
+	necessary synchronization between clients accessing the same buffers.
+	This often involves evicting some objects from the GTT and re-binding
+	others (a fairly expensive operation), and providing relocation
+	support which hides fixed GTT offsets from clients.  Clients must
+	take care not to submit command buffers that reference more objects
+	than can fit in the GTT or GEM will reject them and no rendering
+	will occur.  Similarly, if several objects in the buffer require
+	fence registers to be allocated for correct rendering (e.g. 2D blits
+	on pre-965 chips), care must be taken not to require more fence
+	registers than are available to the client.  Such resource management
+	should be abstracted from the client in libdrm.
+      </para>
+    </sect2>
+
+  </sect1>
+
+  <!-- Output management -->
+  <sect1>
+    <title>Output management</title>
+    <para>
+      At the core of the DRM output management code is a set of
+      structures representing CRTCs, encoders and connectors.
+    </para>
+    <para>
+      A CRTC is an abstraction representing a part of the chip that
+      contains a pointer to a scanout buffer.  Therefore, the number
+      of CRTCs available determines how many independent scanout
+      buffers can be active at any given time.  The CRTC structure
+      contains several fields to support this: a pointer to some video
+      memory, a display mode, and an (x, y) offset into the video
+      memory to support panning or configurations where one piece of
+      video memory spans multiple CRTCs.
+    </para>
+    <para>
+      An encoder takes pixel data from a CRTC and converts it to a
+      format suitable for any attached connectors.  On some devices,
+      it may be possible to have a CRTC send data to more than one
+      encoder.  In that case, both encoders would receive data from
+      the same scanout buffer, resulting in a "cloned" display
+      configuration across the connectors attached to each encoder.
+    </para>
+    <para>
+      A connector is the final destination for pixel data on a device,
+      and usually connects directly to an external display device like
+      a monitor or laptop panel.  A connector can only be attached to
+      one encoder at a time.  The connector is also the structure
+      where information about the attached display is kept, so it
+      contains fields for display data, EDID data, DPMS &amp;
+      connection status, and information about modes supported on the
+      attached displays.
+    </para>
+<!--!Edrivers/char/drm/drm_crtc.c-->
+  </sect1>
+
+  <sect1>
+    <title>Framebuffer management</title>
+    <para>
+      In order to set a mode on a given CRTC, encoder and connector
+      configuration, clients need to provide a framebuffer object which
+      will provide a source of pixels for the CRTC to deliver to the encoder(s)
+      and ultimately the connector(s) in the configuration.  A framebuffer
+      is fundamentally a driver specific memory object, made into an opaque
+      handle by the DRM addfb function.  Once an fb has been created this
+      way it can be passed to the KMS mode setting routines for use in
+      a configuration.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Command submission &amp; fencing</title>
+    <para>
+      This should cover a few device specific command submission
+      implementations.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Suspend/resume</title>
+    <para>
+      The DRM core provides some suspend/resume code, but drivers
+      wanting full suspend/resume support should provide save() and
+      restore() functions.  These will be called at suspend,
+      hibernate, or resume time, and should perform any state save or
+      restore required by your device across suspend or hibernate
+      states.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>DMA services</title>
+    <para>
+      This should cover how DMA mapping etc. is supported by the core.
+      These functions are deprecated and should not be used.
+    </para>
+  </sect1>
+  </chapter>
+
+  <!-- External interfaces -->
+
+  <chapter id="drmExternals">
+    <title>Userland interfaces</title>
+    <para>
+      The DRM core exports several interfaces to applications,
+      generally intended to be used through corresponding libdrm
+      wrapper functions.  In addition, drivers export device specific
+      interfaces for use by userspace drivers &amp; device aware
+      applications through ioctls and sysfs files.
+    </para>
+    <para>
+      External interfaces include: memory mapping, context management,
+      DMA operations, AGP management, vblank control, fence
+      management, memory management, and output management.
+    </para>
+    <para>
+      Cover generic ioctls and sysfs layout here.  Only need high
+      level info, since man pages will cover the rest.
+    </para>
+  </chapter>
+
+  <!-- API reference -->
+
+  <appendix id="drmDriverApi">
+    <title>DRM Driver API</title>
+    <para>
+      Include auto-generated API reference here (need to reference it
+      from paragraphs above too).
+    </para>
+  </appendix>
+
+</book>
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 5cff41a..55f12ac 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -4,7 +4,7 @@
 
 <book id="kgdbOnLinux">
  <bookinfo>
-  <title>Using kgdb and the kgdb Internals</title>
+  <title>Using kgdb, kdb and the kernel debugger internals</title>
 
   <authorgroup>
    <author>
@@ -17,33 +17,8 @@
     </affiliation>
    </author>
   </authorgroup>
-
-  <authorgroup>
-   <author>
-    <firstname>Tom</firstname>
-    <surname>Rini</surname>
-    <affiliation>
-     <address>
-      <email>trini@kernel.crashing.org</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <authorgroup>
-   <author>
-    <firstname>Amit S.</firstname>
-    <surname>Kale</surname>
-    <affiliation>
-     <address>
-      <email>amitkale@linsyssoft.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
   <copyright>
-   <year>2008</year>
+   <year>2008,2010</year>
    <holder>Wind River Systems, Inc.</holder>
   </copyright>
   <copyright>
@@ -69,41 +44,76 @@
   <chapter id="Introduction">
     <title>Introduction</title>
     <para>
-    kgdb is a source level debugger for linux kernel. It is used along
-    with gdb to debug a linux kernel.  The expectation is that gdb can
-    be used to "break in" to the kernel to inspect memory, variables
-    and look through call stack information similar to what an
-    application developer would use gdb for.  It is possible to place
-    breakpoints in kernel code and perform some limited execution
-    stepping.
+    The kernel has two different debugger front ends (kdb and kgdb)
+    which interface to the debug core.  It is possible to use either
+    of the debugger front ends and dynamically transition between them
+    if you configure the kernel properly at compile and runtime.
     </para>
     <para>
-    Two machines are required for using kgdb. One of these machines is a
-    development machine and the other is a test machine.  The kernel
-    to be debugged runs on the test machine. The development machine
-    runs an instance of gdb against the vmlinux file which contains
-    the symbols (not boot image such as bzImage, zImage, uImage...).
-    In gdb the developer specifies the connection parameters and
-    connects to kgdb.  The type of connection a developer makes with
-    gdb depends on the availability of kgdb I/O modules compiled as
-    builtin's or kernel modules in the test machine's kernel.
+    Kdb is simplistic shell-style interface which you can use on a
+    system console with a keyboard or serial console.  You can use it
+    to inspect memory, registers, process lists, dmesg, and even set
+    breakpoints to stop in a certain location.  Kdb is not a source
+    level debugger, although you can set breakpoints and execute some
+    basic kernel run control.  Kdb is mainly aimed at doing some
+    analysis to aid in development or diagnosing kernel problems.  You
+    can access some symbols by name in kernel built-ins or in kernel
+    modules if the code was built
+    with <symbol>CONFIG_KALLSYMS</symbol>.
+    </para>
+    <para>
+    Kgdb is intended to be used as a source level debugger for the
+    Linux kernel. It is used along with gdb to debug a Linux kernel.
+    The expectation is that gdb can be used to "break in" to the
+    kernel to inspect memory, variables and look through call stack
+    information similar to the way an application developer would use
+    gdb to debug an application.  It is possible to place breakpoints
+    in kernel code and perform some limited execution stepping.
+    </para>
+    <para>
+    Two machines are required for using kgdb. One of these machines is
+    a development machine and the other is the target machine.  The
+    kernel to be debugged runs on the target machine. The development
+    machine runs an instance of gdb against the vmlinux file which
+    contains the symbols (not boot image such as bzImage, zImage,
+    uImage...).  In gdb the developer specifies the connection
+    parameters and connects to kgdb.  The type of connection a
+    developer makes with gdb depends on the availability of kgdb I/O
+    modules compiled as built-ins or loadable kernel modules in the test
+    machine's kernel.
     </para>
   </chapter>
   <chapter id="CompilingAKernel">
-    <title>Compiling a kernel</title>
+  <title>Compiling a kernel</title>
+  <para>
+  <itemizedlist>
+  <listitem><para>In order to enable compilation of kdb, you must first enable kgdb.</para></listitem>
+  <listitem><para>The kgdb test compile options are described in the kgdb test suite chapter.</para></listitem>
+  </itemizedlist>
+  </para>
+  <sect1 id="CompileKGDB">
+    <title>Kernel config options for kgdb</title>
     <para>
     To enable <symbol>CONFIG_KGDB</symbol> you should first turn on
     "Prompt for development and/or incomplete code/drivers"
     (CONFIG_EXPERIMENTAL) in  "General setup", then under the
-    "Kernel debugging" select "KGDB: kernel debugging with remote gdb".
+    "Kernel debugging" select "KGDB: kernel debugger".
+    </para>
+    <para>
+    While it is not a hard requirement that you have symbols in your
+    vmlinux file, gdb tends not to be very useful without the symbolic
+    data, so you will want to turn
+    on <symbol>CONFIG_DEBUG_INFO</symbol> which is called "Compile the
+    kernel with debug info" in the config menu.
     </para>
     <para>
     It is advised, but not required that you turn on the
-    CONFIG_FRAME_POINTER kernel option.  This option inserts code to
-    into the compiled executable which saves the frame information in
-    registers or on the stack at different points which will allow a
-    debugger such as gdb to more accurately construct stack back traces
-    while debugging the kernel.
+    <symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
+    kernel with frame pointers" in the config menu.  This option
+    inserts code to into the compiled executable which saves the frame
+    information in registers or on the stack at different points which
+    allows a debugger such as gdb to more accurately construct
+    stack back traces while debugging the kernel.
     </para>
     <para>
     If the architecture that you are using supports the kernel option
@@ -116,38 +126,160 @@
     this option.
     </para>
     <para>
-    Next you should choose one of more I/O drivers to interconnect debugging
-    host and debugged target.  Early boot debugging requires a KGDB
-    I/O driver that supports early debugging and the driver must be
-    built into the kernel directly. Kgdb I/O driver configuration
-    takes place via kernel or module parameters, see following
-    chapter.
+    Next you should choose one of more I/O drivers to interconnect
+    debugging host and debugged target.  Early boot debugging requires
+    a KGDB I/O driver that supports early debugging and the driver
+    must be built into the kernel directly. Kgdb I/O driver
+    configuration takes place via kernel or module parameters which
+    you can learn more about in the in the section that describes the
+    parameter "kgdboc".
     </para>
-    <para>
-    The kgdb test compile options are described in the kgdb test suite chapter.
+    <para>Here is an example set of .config symbols to enable or
+    disable for kgdb:
+    <itemizedlist>
+    <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+    <listitem><para>CONFIG_KGDB=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+    </itemizedlist>
     </para>
-
+  </sect1>
+  <sect1 id="CompileKDB">
+    <title>Kernel config options for kdb</title>
+    <para>Kdb is quite a bit more complex than the simple gdbstub
+    sitting on top of the kernel's debug core.  Kdb must implement a
+    shell, and also adds some helper functions in other parts of the
+    kernel, responsible for printing out interesting data such as what
+    you would see if you ran "lsmod", or "ps".  In order to build kdb
+    into the kernel you follow the same steps as you would for kgdb.
+    </para>
+    <para>The main config option for kdb
+    is <symbol>CONFIG_KGDB_KDB</symbol> which is called "KGDB_KDB:
+    include kdb frontend for kgdb" in the config menu.  In theory you
+    would have already also selected an I/O driver such as the
+    CONFIG_KGDB_SERIAL_CONSOLE interface if you plan on using kdb on a
+    serial port, when you were configuring kgdb.
+    </para>
+    <para>If you want to use a PS/2-style keyboard with kdb, you would
+    select CONFIG_KDB_KEYBOARD which is called "KGDB_KDB: keyboard as
+    input device" in the config menu.  The CONFIG_KDB_KEYBOARD option
+    is not used for anything in the gdb interface to kgdb.  The
+    CONFIG_KDB_KEYBOARD option only works with kdb.
+    </para>
+    <para>Here is an example set of .config symbols to enable/disable kdb:
+    <itemizedlist>
+    <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+    <listitem><para>CONFIG_KGDB=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_KDB=y</para></listitem>
+    <listitem><para>CONFIG_KDB_KEYBOARD=y</para></listitem>
+    </itemizedlist>
+    </para>
+  </sect1>
   </chapter>
-  <chapter id="EnableKGDB">
-   <title>Enable kgdb for debugging</title>
-   <para>
-   In order to use kgdb you must activate it by passing configuration
-   information to one of the kgdb I/O drivers.  If you do not pass any
-   configuration information kgdb will not do anything at all.  Kgdb
-   will only actively hook up to the kernel trap hooks if a kgdb I/O
-   driver is loaded and configured.  If you unconfigure a kgdb I/O
-   driver, kgdb will unregister all the kernel hook points.
+  <chapter id="kgdbKernelArgs">
+  <title>Kernel Debugger Boot Arguments</title>
+  <para>This section describes the various runtime kernel
+  parameters that affect the configuration of the kernel debugger.
+  The following chapter covers using kdb and kgdb as well as
+  provides some examples of the configuration parameters.</para>
+   <sect1 id="kgdboc">
+   <title>Kernel parameter: kgdboc</title>
+   <para>The kgdboc driver was originally an abbreviation meant to
+   stand for "kgdb over console".  Today it is the primary mechanism
+   to configure how to communicate from gdb to kgdb as well as the
+   devices you want to use to interact with the kdb shell.
    </para>
-   <para>
-   All drivers can be reconfigured at run time, if
-   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
-   are enabled, by echo'ing a new config string to
-   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
-   The driver can be unconfigured by passing an empty string.  You cannot
-   change the configuration while the debugger is attached.  Make sure
-   to detach the debugger with the <constant>detach</constant> command
-   prior to trying unconfigure a kgdb I/O driver.
+   <para>For kgdb/gdb, kgdboc is designed to work with a single serial
+   port. It is intended to cover the circumstance where you want to
+   use a serial console as your primary console as well as using it to
+   perform kernel debugging.  It is also possible to use kgdb on a
+   serial port which is not designated as a system console.  Kgdboc
+   may be configured as a kernel built-in or a kernel loadable module.
+   You can only make use of <constant>kgdbwait</constant> and early
+   debugging if you build kgdboc into the kernel as a built-in.
    </para>
+   <sect2 id="kgdbocArgs">
+   <title>kgdboc arguments</title>
+   <para>Usage: <constant>kgdboc=[kbd][[,]serial_device][,baud]</constant></para>
+   <sect3 id="kgdbocArgs1">
+   <title>Using loadable module or built-in</title>
+   <para>
+   <orderedlist>
+   <listitem><para>As a kernel built-in:</para>
+   <para>Use the kernel boot argument: <constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para></listitem>
+   <listitem>
+   <para>As a kernel loadable module:</para>
+   <para>Use the command: <constant>modprobe kgdboc kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+   <para>Here are two examples of how you might formate the kgdboc
+   string. The first is for an x86 target using the first serial port.
+   The second example is for the ARM Versatile AB using the second
+   serial port.
+   <orderedlist>
+   <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+   <listitem><para><constant>kgdboc=ttyAMA1,115200</constant></para></listitem>
+   </orderedlist>
+   </para>
+   </listitem>
+   </orderedlist></para>
+   </sect3>
+   <sect3 id="kgdbocArgs2">
+   <title>Configure kgdboc at runtime with sysfs</title>
+   <para>At run time you can enable or disable kgdboc by echoing a
+   parameters into the sysfs.  Here are two examples:</para>
+   <orderedlist>
+   <listitem><para>Enable kgdboc on ttyS0</para>
+   <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   <listitem><para>Disable kgdboc</para>
+   <para><constant>echo "" &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   </orderedlist>
+   <para>NOTE: You do not need to specify the baud if you are
+   configuring the console on tty which is already configured or
+   open.</para>
+   </sect3>
+   <sect3 id="kgdbocArgs3">
+   <title>More examples</title>
+   <para>You can configure kgdboc to use the keyboard, and or a serial device
+   depending on if you are using kdb and or kgdb, in one of the
+   following scenarios.
+   <orderedlist>
+   <listitem><para>kdb and kgdb over only a serial port</para>
+   <para><constant>kgdboc=&lt;serial_device&gt;[,baud]</constant></para>
+   <para>Example: <constant>kgdboc=ttyS0,115200</constant></para>
+   </listitem>
+   <listitem><para>kdb and kgdb with keyboard and a serial port</para>
+   <para><constant>kgdboc=kbd,&lt;serial_device&gt;[,baud]</constant></para>
+   <para>Example: <constant>kgdboc=kbd,ttyS0,115200</constant></para>
+   </listitem>
+   <listitem><para>kdb with a keyboard</para>
+   <para><constant>kgdboc=kbd</constant></para>
+   </listitem>
+   </orderedlist>
+   </para>
+   </sect3>
+   <para>NOTE: Kgdboc does not support interrupting the target via the
+   gdb remote protocol.  You must manually send a sysrq-g unless you
+   have a proxy that splits console output to a terminal program.
+   A console proxy has a separate TCP port for the debugger and a separate
+   TCP port for the "human" console.  The proxy can take care of sending
+   the sysrq-g for you.
+   </para>
+   <para>When using kgdboc with no debugger proxy, you can end up
+    connecting the debugger at one of two entry points.  If an
+    exception occurs after you have loaded kgdboc, a message should
+    print on the console stating it is waiting for the debugger.  In
+    this case you disconnect your terminal program and then connect the
+    debugger in its place.  If you want to interrupt the target system
+    and forcibly enter a debug session you have to issue a Sysrq
+    sequence and then type the letter <constant>g</constant>.  Then
+    you disconnect the terminal session and connect gdb.  Your options
+    if you don't like this are to hack gdb to send the sysrq-g for you
+    as well as on the initial connect, or to use a debugger proxy that
+    allows an unmodified gdb to do the debugging.
+   </para>
+   </sect2>
+   </sect1>
    <sect1 id="kgdbwait">
    <title>Kernel parameter: kgdbwait</title>
    <para>
@@ -162,103 +294,204 @@
    </para>
    <para>
    The kernel will stop and wait as early as the I/O driver and
-   architecture will allow when you use this option.  If you build the
-   kgdb I/O driver as a kernel module kgdbwait will not do anything.
+   architecture allows when you use this option.  If you build the
+   kgdb I/O driver as a loadable kernel module kgdbwait will not do
+   anything.
    </para>
    </sect1>
-  <sect1 id="kgdboc">
-  <title>Kernel parameter: kgdboc</title>
-  <para>
-  The kgdboc driver was originally an abbreviation meant to stand for
-  "kgdb over console".  Kgdboc is designed to work with a single
-  serial port. It was meant to cover the circumstance
-  where you wanted to use a serial console as your primary console as
-  well as using it to perform kernel debugging.  Of course you can
-  also use kgdboc without assigning a console to the same port.
-  </para>
-  <sect2 id="UsingKgdboc">
-  <title>Using kgdboc</title>
-  <para>
-  You can configure kgdboc via sysfs or a module or kernel boot line
-  parameter depending on if you build with CONFIG_KGDBOC as a module
-  or built-in.
-  <orderedlist>
-  <listitem><para>From the module load or build-in</para>
-  <para><constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
-  <para>
-  The example here would be if your console port was typically ttyS0, you would use something like <constant>kgdboc=ttyS0,115200</constant> or on the ARM Versatile AB you would likely use <constant>kgdboc=ttyAMA0,115200</constant>
-  </para>
-  </listitem>
-  <listitem><para>From sysfs</para>
-  <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para>
-  </listitem>
-  </orderedlist>
-  </para>
-  <para>
-  NOTE: Kgdboc does not support interrupting the target via the
-  gdb remote protocol.  You must manually send a sysrq-g unless you
-  have a proxy that splits console output to a terminal problem and
-  has a separate port for the debugger to connect to that sends the
-  sysrq-g for you.
-  </para>
-  <para>When using kgdboc with no debugger proxy, you can end up
-  connecting the debugger for one of two entry points.  If an
-  exception occurs after you have loaded kgdboc a message should print
-  on the console stating it is waiting for the debugger.  In case you
-  disconnect your terminal program and then connect the debugger in
-  its place.  If you want to interrupt the target system and forcibly
-  enter a debug session you have to issue a Sysrq sequence and then
-  type the letter <constant>g</constant>.  Then you disconnect the
-  terminal session and connect gdb.  Your options if you don't like
-  this are to hack gdb to send the sysrq-g for you as well as on the
-  initial connect, or to use a debugger proxy that allows an
-  unmodified gdb to do the debugging.
-  </para>
-  </sect2>
-  </sect1>
-  <sect1 id="kgdbcon">
-  <title>Kernel parameter: kgdbcon</title>
-  <para>
-  Kgdb supports using the gdb serial protocol to send console messages
-  to the debugger when the debugger is connected and running.  There
-  are two ways to activate this feature.
-  <orderedlist>
-  <listitem><para>Activate with the kernel command line option:</para>
-  <para><constant>kgdbcon</constant></para>
-  </listitem>
-  <listitem><para>Use sysfs before configuring an io driver</para>
-  <para>
-  <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
-  </para>
-  <para>
-  NOTE: If you do this after you configure the kgdb I/O driver, the
-  setting will not take effect until the next point the I/O is
-  reconfigured.
-  </para>
-  </listitem>
-  </orderedlist>
-  </para>
-  <para>
-  IMPORTANT NOTE: Using this option with kgdb over the console
-  (kgdboc) is not supported.
+   <sect1 id="kgdbcon">
+   <title>Kernel parameter: kgdbcon</title>
+   <para> The kgdbcon feature allows you to see printk() messages
+   inside gdb while gdb is connected to the kernel.  Kdb does not make
+    use of the kgdbcon feature.
+   </para>
+   <para>Kgdb supports using the gdb serial protocol to send console
+   messages to the debugger when the debugger is connected and running.
+   There are two ways to activate this feature.
+   <orderedlist>
+   <listitem><para>Activate with the kernel command line option:</para>
+   <para><constant>kgdbcon</constant></para>
+   </listitem>
+   <listitem><para>Use sysfs before configuring an I/O driver</para>
+   <para>
+   <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
+   </para>
+   <para>
+   NOTE: If you do this after you configure the kgdb I/O driver, the
+   setting will not take effect until the next point the I/O is
+   reconfigured.
+   </para>
+   </listitem>
+   </orderedlist>
+   <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
+   active system console.  An example incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
+   </para>
+   <para>It is possible to use this option with kgdboc on a tty that is not a system console.
+   </para>
   </para>
   </sect1>
   </chapter>
-  <chapter id="ConnectingGDB">
-  <title>Connecting gdb</title>
+  <chapter id="usingKDB">
+  <title>Using kdb</title>
+  <para>
+  </para>
+  <sect1 id="quickKDBserial">
+  <title>Quick start for kdb on a serial port</title>
+  <para>This is a quick example of how to use kdb.</para>
+  <para><orderedlist>
+  <listitem><para>Boot kernel with arguments:
+  <itemizedlist>
+  <listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
+  </itemizedlist></para>
+  <para>OR</para>
+  <para>Configure kgdboc after the kernel booted; assuming you are using a serial port console:
+  <itemizedlist>
+  <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+  </itemizedlist>
+  </para>
+  </listitem>
+  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using minicom 2.2</para>
+  <para>Press: <constant>Control-a</constant></para>
+  <para>Press: <constant>f</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+  <para>Press: <constant>Control-]</constant></para>
+  <para>Type in:<constant>send break</constant></para>
+  <para>Press: <constant>Enter</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  </itemizedlist>
+  </listitem>
+  <listitem><para>From the kdb prompt you can run the "help" command to see a complete list of the commands that are available.</para>
+  <para>Some useful commands in kdb include:
+  <itemizedlist>
+  <listitem><para>lsmod  -- Shows where kernel modules are loaded</para></listitem>
+  <listitem><para>ps -- Displays only the active processes</para></listitem>
+  <listitem><para>ps A -- Shows all the processes</para></listitem>
+  <listitem><para>summary -- Shows kernel version info and memory usage</para></listitem>
+  <listitem><para>bt -- Get a backtrace of the current process using dump_stack()</para></listitem>
+  <listitem><para>dmesg -- View the kernel syslog buffer</para></listitem>
+  <listitem><para>go -- Continue the system</para></listitem>
+  </itemizedlist>
+  </para>
+  </listitem>
+  <listitem>
+  <para>When you are done using kdb you need to consider rebooting the
+  system or using the "go" command to resuming normal kernel
+  execution.  If you have paused the kernel for a lengthy period of
+  time, applications that rely on timely networking or anything to do
+  with real wall clock time could be adversely affected, so you
+  should take this into consideration when using the kernel
+  debugger.</para>
+  </listitem>
+  </orderedlist></para>
+  </sect1>
+  <sect1 id="quickKDBkeyboard">
+  <title>Quick start for kdb using a keyboard connected console</title>
+  <para>This is a quick example of how to use kdb with a keyboard.</para>
+  <para><orderedlist>
+  <listitem><para>Boot kernel with arguments:
+  <itemizedlist>
+  <listitem><para><constant>kgdboc=kbd</constant></para></listitem>
+  </itemizedlist></para>
+  <para>OR</para>
+  <para>Configure kgdboc after the kernel booted:
+  <itemizedlist>
+  <listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+  </itemizedlist>
+  </para>
+  </listitem>
+  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using a laptop keyboard</para>
+  <para>Press and hold down: <constant>Alt</constant></para>
+  <para>Press and hold down: <constant>Fn</constant></para>
+  <para>Press and release the key with the label: <constant>SysRq</constant></para>
+  <para>Release: <constant>Fn</constant></para>
+  <para>Press and release: <constant>g</constant></para>
+  <para>Release: <constant>Alt</constant></para>
+  </listitem>
+  <listitem><para>Example using a PS/2 101-key keyboard</para>
+  <para>Press and hold down: <constant>Alt</constant></para>
+  <para>Press and release the key with the label: <constant>SysRq</constant></para>
+  <para>Press and release: <constant>g</constant></para>
+  <para>Release: <constant>Alt</constant></para>
+  </listitem>
+  </itemizedlist>
+  </listitem>
+  <listitem>
+  <para>Now type in a kdb command such as "help", "dmesg", "bt" or "go" to continue kernel execution.</para>
+  </listitem>
+  </orderedlist></para>
+  </sect1>
+  </chapter>
+  <chapter id="EnableKGDB">
+   <title>Using kgdb / gdb</title>
+   <para>In order to use kgdb you must activate it by passing
+   configuration information to one of the kgdb I/O drivers.  If you
+   do not pass any configuration information kgdb will not do anything
+   at all.  Kgdb will only actively hook up to the kernel trap hooks
+   if a kgdb I/O driver is loaded and configured.  If you unconfigure
+   a kgdb I/O driver, kgdb will unregister all the kernel hook points.
+   </para>
+   <para> All kgdb I/O drivers can be reconfigured at run time, if
+   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
+   are enabled, by echo'ing a new config string to
+   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
+   The driver can be unconfigured by passing an empty string.  You cannot
+   change the configuration while the debugger is attached.  Make sure
+   to detach the debugger with the <constant>detach</constant> command
+   prior to trying to unconfigure a kgdb I/O driver.
+   </para>
+  <sect1 id="ConnectingGDB">
+  <title>Connecting with gdb to a serial port</title>
+  <orderedlist>
+  <listitem><para>Configure kgdboc</para>
+   <para>Boot kernel with arguments:
+   <itemizedlist>
+    <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+   </itemizedlist></para>
+   <para>OR</para>
+   <para>Configure kgdboc after the kernel booted:
+   <itemizedlist>
+    <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   </itemizedlist></para>
+  </listitem>
+  <listitem>
+  <para>Stop kernel execution (break into the debugger)</para>
+  <para>In order to connect to gdb via kgdboc, the kernel must
+  first be stopped.  There are several ways to stop the kernel which
+  include using kgdbwait as a boot argument, via a sysrq-g, or running
+  the kernel until it takes an exception where it waits for the
+  debugger to attach.
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using minicom 2.2</para>
+  <para>Press: <constant>Control-a</constant></para>
+  <para>Press: <constant>f</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+  <para>Press: <constant>Control-]</constant></para>
+  <para>Type in:<constant>send break</constant></para>
+  <para>Press: <constant>Enter</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  </itemizedlist>
+  </para>
+  </listitem>
+  <listitem>
+    <para>Connect from from gdb</para>
     <para>
-    If you are using kgdboc, you need to have used kgdbwait as a boot
-    argument, issued a sysrq-g, or the system you are going to debug
-    has already taken an exception and is waiting for the debugger to
-    attach before you can connect gdb.
-    </para>
-    <para>
-    If you are not using different kgdb I/O driver other than kgdboc,
-    you should be able to connect and the target will automatically
-    respond.
-    </para>
-    <para>
-    Example (using a serial port):
+    Example (using a directly connected port):
     </para>
     <programlisting>
     % gdb ./vmlinux
@@ -266,7 +499,7 @@
     (gdb) target remote /dev/ttyS0
     </programlisting>
     <para>
-    Example (kgdb to a terminal server on tcp port 2012):
+    Example (kgdb to a terminal server on TCP port 2012):
     </para>
     <programlisting>
     % gdb ./vmlinux
@@ -283,6 +516,83 @@
     communications.  You do this prior to issuing the <constant>target
     remote</constant> command by typing in: <constant>set debug remote 1</constant>
     </para>
+  </listitem>
+  </orderedlist>
+  <para>Remember if you continue in gdb, and need to "break in" again,
+  you need to issue an other sysrq-g.  It is easy to create a simple
+  entry point by putting a breakpoint at <constant>sys_sync</constant>
+  and then you can run "sync" from a shell or script to break into the
+  debugger.</para>
+  </sect1>
+  </chapter>
+  <chapter id="switchKdbKgdb">
+  <title>kgdb and kdb interoperability</title>
+  <para>It is possible to transition between kdb and kgdb dynamically.
+  The debug core will remember which you used the last time and
+  automatically start in the same mode.</para>
+  <sect1>
+  <title>Switching between kdb and kgdb</title>
+  <sect2>
+  <title>Switching from kgdb to kdb</title>
+  <para>
+  There are two ways to switch from kgdb to kdb: you can use gdb to
+  issue a maintenance packet, or you can blindly type the command $3#33.
+  Whenever kernel debugger stops in kgdb mode it will print the
+  message <constant>KGDB or $3#33 for KDB</constant>.  It is important
+  to note that you have to type the sequence correctly in one pass.
+  You cannot type a backspace or delete because kgdb will interpret
+  that as part of the debug stream.
+  <orderedlist>
+  <listitem><para>Change from kgdb to kdb by blindly typing:</para>
+  <para><constant>$3#33</constant></para></listitem>
+  <listitem><para>Change from kgdb to kdb with gdb</para>
+  <para><constant>maintenance packet 3</constant></para>
+  <para>NOTE: Now you must kill gdb. Typically you press control-z and
+  issue the command: kill -9 %</para></listitem>
+  </orderedlist>
+  </para>
+  </sect2>
+  <sect2>
+  <title>Change from kdb to kgdb</title>
+  <para>There are two ways you can change from kdb to kgdb.  You can
+  manually enter kgdb mode by issuing the kgdb command from the kdb
+  shell prompt, or you can connect gdb while the kdb shell prompt is
+  active.  The kdb shell looks for the typical first commands that gdb
+  would issue with the gdb remote protocol and if it sees one of those
+  commands it automatically changes into kgdb mode.</para>
+  <orderedlist>
+  <listitem><para>From kdb issue the command:</para>
+  <para><constant>kgdb</constant></para>
+  <para>Now disconnect your terminal program and connect gdb in its place</para></listitem>
+  <listitem><para>At the kdb prompt, disconnect the terminal program and connect gdb in its place.</para></listitem>
+  </orderedlist>
+  </sect2>
+  </sect1>
+  <sect1>
+  <title>Running kdb commands from gdb</title>
+  <para>It is possible to run a limited set of kdb commands from gdb,
+  using the gdb monitor command.  You don't want to execute any of the
+  run control or breakpoint operations, because it can disrupt the
+  state of the kernel debugger.  You should be using gdb for
+  breakpoints and run control operations if you have gdb connected.
+  The more useful commands to run are things like lsmod, dmesg, ps or
+  possibly some of the memory information commands.  To see all the kdb
+  commands you can run <constant>monitor help</constant>.</para>
+  <para>Example:
+  <informalexample><programlisting>
+(gdb) monitor ps
+1 idle process (state I) and
+27 sleeping system daemon (state M) processes suppressed,
+use 'ps A' to see all.
+Task Addr       Pid   Parent [*] cpu State Thread     Command
+
+0xc78291d0        1        0  0    0   S  0xc7829404  init
+0xc7954150      942        1  0    0   S  0xc7954384  dropbear
+0xc78789c0      944        1  0    0   S  0xc7878bf4  sh
+(gdb)
+  </programlisting></informalexample>
+  </para>
+  </sect1>
   </chapter>
   <chapter id="KGDBTestSuite">
     <title>kgdb Test Suite</title>
@@ -309,34 +619,36 @@
     </para>
   </chapter>
   <chapter id="CommonBackEndReq">
-  <title>KGDB Internals</title>
+  <title>Kernel Debugger Internals</title>
   <sect1 id="kgdbArchitecture">
     <title>Architecture Specifics</title>
       <para>
-      Kgdb is organized into three basic components:
+      The kernel debugger is organized into a number of components:
       <orderedlist>
-      <listitem><para>kgdb core</para>
+      <listitem><para>The debug core</para>
       <para>
-      The kgdb core is found in kernel/kgdb.c.  It contains:
+      The debug core is found in kernel/debugger/debug_core.c.  It contains:
       <itemizedlist>
-      <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
-      <listitem><para>A generic OS exception handler which includes sync'ing the processors into a stopped state on an multi cpu system.</para></listitem>
+      <listitem><para>A generic OS exception handler which includes
+      sync'ing the processors into a stopped state on an multi-CPU
+      system.</para></listitem>
       <listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
-      <listitem><para>The API to make calls to the arch specific kgdb implementation</para></listitem>
+      <listitem><para>The API to make calls to the arch-specific kgdb implementation</para></listitem>
       <listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
       <listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
+      <listitem><para>The API to invoke either the kdb or kgdb frontend to the debug core.</para></listitem>
       </itemizedlist>
       </para>
       </listitem>
-      <listitem><para>kgdb arch specific implementation</para>
+      <listitem><para>kgdb arch-specific implementation</para>
       <para>
       This implementation is generally found in arch/*/kernel/kgdb.c.
       As an example, arch/x86/kernel/kgdb.c contains the specifics to
       implement HW breakpoint as well as the initialization to
       dynamically register and unregister for the trap handlers on
-      this architecture.  The arch specific portion implements:
+      this architecture.  The arch-specific portion implements:
       <itemizedlist>
-      <listitem><para>contains an arch specific trap catcher which
+      <listitem><para>contains an arch-specific trap catcher which
       invokes kgdb_handle_exception() to start kgdb about doing its
       work</para></listitem>
       <listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
@@ -347,11 +659,35 @@
       </itemizedlist>
       </para>
       </listitem>
+      <listitem><para>gdbstub frontend (aka kgdb)</para>
+      <para>The gdbstub is located in kernel/debug/gdbstub.c. It contains:</para>
+      <itemizedlist>
+        <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
+      </itemizedlist>
+      </listitem>
+      <listitem><para>kdb frontend</para>
+      <para>The kdb debugger shell is broken down into a number of
+      components.  The kdb core is located in kernel/debug/kdb.  There
+      are a number of helper functions in some of the other kernel
+      components to make it possible for kdb to examine and report
+      information about the kernel without taking locks that could
+      cause a kernel deadlock.  The kdb core contains implements the following functionality.</para>
+      <itemizedlist>
+        <listitem><para>A simple shell</para></listitem>
+        <listitem><para>The kdb core command set</para></listitem>
+        <listitem><para>A registration API to register additional kdb shell commands.</para>
+        <para>A good example of a self-contained kdb module is the "ftdump" command for dumping the ftrace buffer.  See: kernel/trace/trace_kdb.c</para></listitem>
+        <listitem><para>The implementation for kdb_printf() which
+        emits messages directly to I/O drivers, bypassing the kernel
+        log.</para></listitem>
+        <listitem><para>SW / HW breakpoint management for the kdb shell</para></listitem>
+      </itemizedlist>
+      </listitem>
       <listitem><para>kgdb I/O driver</para>
       <para>
-      Each kgdb I/O driver has to provide an implemenation for the following:
+      Each kgdb I/O driver has to provide an implementation for the following:
       <itemizedlist>
-      <listitem><para>configuration via builtin or module</para></listitem>
+      <listitem><para>configuration via built-in or module</para></listitem>
       <listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
       <listitem><para>read and write character interface</para></listitem>
       <listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
@@ -416,15 +752,15 @@
   underlying low level to the hardware driver having "polling hooks"
   which the to which the tty driver is attached.  In the initial
   implementation of kgdboc it the serial_core was changed to expose a
-  low level uart hook for doing polled mode reading and writing of a
+  low level UART hook for doing polled mode reading and writing of a
   single character while in an atomic context.  When kgdb makes an I/O
   request to the debugger, kgdboc invokes a call back in the serial
-  core which in turn uses the call back in the uart driver.  It is
-  certainly possible to extend kgdboc to work with non-uart based
+  core which in turn uses the call back in the UART driver.  It is
+  certainly possible to extend kgdboc to work with non-UART based
   consoles in the future.
   </para>
   <para>
-  When using kgdboc with a uart, the uart driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
+  When using kgdboc with a UART, the UART driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
 #ifdef CONFIG_CONSOLE_POLL
 	.poll_get_char = serial8250_get_poll_char,
 	.poll_put_char = serial8250_put_poll_char,
@@ -434,7 +770,7 @@
   <constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
   Keep in mind that polling hooks have to be implemented in such a way
   that they can be called from an atomic context and have to restore
-  the state of the uart chip on return such that the system can return
+  the state of the UART chip on return such that the system can return
   to normal when the debugger detaches.  You need to be very careful
   with any kind of lock you consider, because failing here is most
   going to mean pressing the reset button.
@@ -453,6 +789,10 @@
 		<itemizedlist>
 		<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
 		</itemizedlist>
+                In Jan 2010 this document was updated to include kdb.
+		<itemizedlist>
+		<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
+		</itemizedlist>
 	</para>
   </chapter>
 </book>
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 630879c..48e0b21 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -17,6 +17,9 @@
 You can do a very simple testing of running two dd threads in two different
 cgroups. Here is what you can do.
 
+- Enable Block IO controller
+	CONFIG_BLK_CGROUP=y
+
 - Enable group scheduling in CFQ
 	CONFIG_CFQ_GROUP_IOSCHED=y
 
@@ -54,32 +57,52 @@
 
 Various user visible config options
 ===================================
+CONFIG_BLK_CGROUP
+	- Block IO controller.
+
+CONFIG_DEBUG_BLK_CGROUP
+	- Debug help. Right now some additional stats file show up in cgroup
+	  if this option is enabled.
+
 CONFIG_CFQ_GROUP_IOSCHED
 	- Enables group scheduling in CFQ. Currently only 1 level of group
 	  creation is allowed.
 
-CONFIG_DEBUG_CFQ_IOSCHED
-	- Enables some debugging messages in blktrace. Also creates extra
-	  cgroup file blkio.dequeue.
-
-Config options selected automatically
-=====================================
-These config options are not user visible and are selected/deselected
-automatically based on IO scheduler configuration.
-
-CONFIG_BLK_CGROUP
-	- Block IO controller. Selected by CONFIG_CFQ_GROUP_IOSCHED.
-
-CONFIG_DEBUG_BLK_CGROUP
-	- Debug help. Selected by CONFIG_DEBUG_CFQ_IOSCHED.
-
 Details of cgroup files
 =======================
 - blkio.weight
-	- Specifies per cgroup weight.
-
+	- Specifies per cgroup weight. This is default weight of the group
+	  on all the devices until and unless overridden by per device rule.
+	  (See blkio.weight_device).
 	  Currently allowed range of weights is from 100 to 1000.
 
+- blkio.weight_device
+	- One can specify per cgroup per device rules using this interface.
+	  These rules override the default value of group weight as specified
+	  by blkio.weight.
+
+	  Following is the format.
+
+	  #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
+	  Configure weight=300 on /dev/sdb (8:16) in this cgroup
+	  # echo 8:16 300 > blkio.weight_device
+	  # cat blkio.weight_device
+	  dev     weight
+	  8:16    300
+
+	  Configure weight=500 on /dev/sda (8:0) in this cgroup
+	  # echo 8:0 500 > blkio.weight_device
+	  # cat blkio.weight_device
+	  dev     weight
+	  8:0     500
+	  8:16    300
+
+	  Remove specific weight for /dev/sda in this cgroup
+	  # echo 8:0 0 > blkio.weight_device
+	  # cat blkio.weight_device
+	  dev     weight
+	  8:16    300
+
 - blkio.time
 	- disk time allocated to cgroup per device in milliseconds. First
 	  two fields specify the major and minor number of the device and
@@ -92,13 +115,105 @@
 	  third field specifies the number of sectors transferred by the
 	  group to/from the device.
 
+- blkio.io_service_bytes
+	- Number of bytes transferred to/from the disk by the group. These
+	  are further divided by the type of operation - read or write, sync
+	  or async. First two fields specify the major and minor number of the
+	  device, third field specifies the operation type and the fourth field
+	  specifies the number of bytes.
+
+- blkio.io_serviced
+	- Number of IOs completed to/from the disk by the group. These
+	  are further divided by the type of operation - read or write, sync
+	  or async. First two fields specify the major and minor number of the
+	  device, third field specifies the operation type and the fourth field
+	  specifies the number of IOs.
+
+- blkio.io_service_time
+	- Total amount of time between request dispatch and request completion
+	  for the IOs done by this cgroup. This is in nanoseconds to make it
+	  meaningful for flash devices too. For devices with queue depth of 1,
+	  this time represents the actual service time. When queue_depth > 1,
+	  that is no longer true as requests may be served out of order. This
+	  may cause the service time for a given IO to include the service time
+	  of multiple IOs when served out of order which may result in total
+	  io_service_time > actual time elapsed. This time is further divided by
+	  the type of operation - read or write, sync or async. First two fields
+	  specify the major and minor number of the device, third field
+	  specifies the operation type and the fourth field specifies the
+	  io_service_time in ns.
+
+- blkio.io_wait_time
+	- Total amount of time the IOs for this cgroup spent waiting in the
+	  scheduler queues for service. This can be greater than the total time
+	  elapsed since it is cumulative io_wait_time for all IOs. It is not a
+	  measure of total time the cgroup spent waiting but rather a measure of
+	  the wait_time for its individual IOs. For devices with queue_depth > 1
+	  this metric does not include the time spent waiting for service once
+	  the IO is dispatched to the device but till it actually gets serviced
+	  (there might be a time lag here due to re-ordering of requests by the
+	  device). This is in nanoseconds to make it meaningful for flash
+	  devices too. This time is further divided by the type of operation -
+	  read or write, sync or async. First two fields specify the major and
+	  minor number of the device, third field specifies the operation type
+	  and the fourth field specifies the io_wait_time in ns.
+
+- blkio.io_merged
+	- Total number of bios/requests merged into requests belonging to this
+	  cgroup. This is further divided by the type of operation - read or
+	  write, sync or async.
+
+- blkio.io_queued
+	- Total number of requests queued up at any given instant for this
+	  cgroup. This is further divided by the type of operation - read or
+	  write, sync or async.
+
+- blkio.avg_queue_size
+	- Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+	  The average queue size for this cgroup over the entire time of this
+	  cgroup's existence. Queue size samples are taken each time one of the
+	  queues of this cgroup gets a timeslice.
+
+- blkio.group_wait_time
+	- Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+	  This is the amount of time the cgroup had to wait since it became busy
+	  (i.e., went from 0 to 1 request queued) to get a timeslice for one of
+	  its queues. This is different from the io_wait_time which is the
+	  cumulative total of the amount of time spent by each IO in that cgroup
+	  waiting in the scheduler queue. This is in nanoseconds. If this is
+	  read when the cgroup is in a waiting (for timeslice) state, the stat
+	  will only report the group_wait_time accumulated till the last time it
+	  got a timeslice and will not include the current delta.
+
+- blkio.empty_time
+	- Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+	  This is the amount of time a cgroup spends without any pending
+	  requests when not being served, i.e., it does not include any time
+	  spent idling for one of the queues of the cgroup. This is in
+	  nanoseconds. If this is read when the cgroup is in an empty state,
+	  the stat will only report the empty_time accumulated till the last
+	  time it had a pending request and will not include the current delta.
+
+- blkio.idle_time
+	- Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+	  This is the amount of time spent by the IO scheduler idling for a
+	  given cgroup in anticipation of a better request than the exising ones
+	  from other queues/cgroups. This is in nanoseconds. If this is read
+	  when the cgroup is in an idling state, the stat will only report the
+	  idle_time accumulated till the last idle period and will not include
+	  the current delta.
+
 - blkio.dequeue
-	- Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. This
+	- Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y. This
 	  gives the statistics about how many a times a group was dequeued
 	  from service tree of the device. First two fields specify the major
 	  and minor number of the device and third field specifies the number
 	  of times a group was dequeued from a particular device.
 
+- blkio.reset_stats
+	- Writing an int to this file will result in resetting all the stats
+	  for that cgroup.
+
 CFQ sysfs tunable
 =================
 /sys/block/<disk>/queue/iosched/group_isolation
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 867c5b50..272f80d 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,8 +59,19 @@
 			Setting it to very large values will improve
 			performance.
 
-barrier=1		This enables/disables barriers.  barrier=0 disables
-			it, barrier=1 enables it.
+barrier=<0(*)|1>	This enables/disables the use of write barriers in
+barrier			the jbd code.  barrier=0 disables, barrier=1 enables.
+nobarrier	(*)	This also requires an IO stack which can support
+			barriers, and if jbd gets an error on a barrier
+			write, it will disable again with a warning.
+			Write barriers enforce proper on-disk ordering
+			of journal commits, making volatile disk write caches
+			safe to use, at some performance penalty.  If
+			your disks are battery-backed in one way or another,
+			disabling barriers may safely improve performance.
+			The mount options "barrier" and "nobarrier" can
+			also be used to enable or disable barriers, for
+			consistency with other ext3 mount options.
 
 orlov		(*)	This enables the new Orlov block allocator. It is
 			enabled by default.
diff --git a/Documentation/filesystems/sysfs-tagging.txt b/Documentation/filesystems/sysfs-tagging.txt
new file mode 100644
index 0000000..caaaf12
--- /dev/null
+++ b/Documentation/filesystems/sysfs-tagging.txt
@@ -0,0 +1,42 @@
+Sysfs tagging
+-------------
+
+(Taken almost verbatim from Eric Biederman's netns tagging patch
+commit msg)
+
+The problem.  Network devices show up in sysfs and with the network
+namespace active multiple devices with the same name can show up in
+the same directory, ouch!
+
+To avoid that problem and allow existing applications in network
+namespaces to see the same interface that is currently presented in
+sysfs, sysfs now has tagging directory support.
+
+By using the network namespace pointers as tags to separate out the
+the sysfs directory entries we ensure that we don't have conflicts
+in the directories and applications only see a limited set of
+the network devices.
+
+Each sysfs directory entry may be tagged with zero or one
+namespaces.  A sysfs_dirent is augmented with a void *s_ns.  If a
+directory entry is tagged, then sysfs_dirent->s_flags will have a
+flag between KOBJ_NS_TYPE_NONE and KOBJ_NS_TYPES, and s_ns will
+point to the namespace to which it belongs.
+
+Each sysfs superblock's sysfs_super_info contains an array void
+*ns[KOBJ_NS_TYPES].  When a a task in a tagging namespace
+kobj_nstype first mounts sysfs, a new superblock is created.  It
+will be differentiated from other sysfs mounts by having its
+s_fs_info->ns[kobj_nstype] set to the new namespace.  Note that
+through bind mounting and mounts propagation, a task can easily view
+the contents of other namespaces' sysfs mounts.  Therefore, when a
+namespace exits, it will call kobj_ns_exit() to invalidate any
+sysfs_dirent->s_ns pointers pointing to it.
+
+Users of this interface:
+- define a type in the kobj_ns_type enumeration.
+- call kobj_ns_type_register() with its kobj_ns_type_operations which has
+  - current_ns() which returns current's namespace
+  - netlink_ns() which returns a socket's namespace
+  - initial_ns() which returns the initial namesapce
+- call kobj_ns_exit() when an individual tag is no longer valid
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e1bb5b2..e307914 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -27,7 +27,13 @@
 Module Parameters
 -----------------
 
-None.
+* disable_features (bit vector)
+Disable selected features normally supported by the device. This makes it
+possible to work around possible driver or hardware bugs if the feature in
+question doesn't work as intended for whatever reason. Bit values:
+  1  disable SMBus PEC
+  2  disable the block buffer
+  8  disable the I2C block read functionality
 
 
 Description
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b12bacd..f5fce48 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -58,6 +58,7 @@
 	ISAPNP	ISA PnP code is enabled.
 	ISDN	Appropriate ISDN support is enabled.
 	JOY	Appropriate joystick support is enabled.
+	KGDB	Kernel debugger support is enabled.
 	KVM	Kernel Virtual Machine support is enabled.
 	LIBATA  Libata driver is enabled
 	LP	Printer support is enabled.
@@ -712,6 +713,12 @@
 			The VGA output is eventually overwritten by the real
 			console.
 
+	ekgdboc=	[X86,KGDB] Allow early kernel console debugging
+			ekgdboc=kbd
+
+			This is desgined to be used in conjunction with
+			the boot argument: earlyprintk=vga
+
 	eata=		[HW,SCSI]
 
 	edd=		[EDD]
@@ -1120,10 +1127,26 @@
 			use the HighMem zone if it exists, and the Normal
 			zone if it does not.
 
-	kgdboc=		[HW] kgdb over consoles.
-			Requires a tty driver that supports console polling.
-			(only serial supported for now)
-			Format: <serial_device>[,baud]
+	kgdbdbgp=	[KGDB,HW] kgdb over EHCI usb debug port.
+			Format: <Controller#>[,poll interval]
+			The controller # is the number of the ehci usb debug
+			port as it is probed via PCI.  The poll interval is
+			optional and is the number seconds in between
+			each poll cycle to the debug port in case you need
+			the functionality for interrupting the kernel with
+			gdb or control-c on the dbgp connection.  When
+			not using this parameter you use sysrq-g to break into
+			the kernel debugger.
+
+	kgdboc=		[KGDB,HW] kgdb over consoles.
+			Requires a tty driver that supports console polling,
+			or a supported polling keyboard driver (non-usb).
+			Serial only format: <serial_device>[,baud]
+			keyboard only format: kbd
+			keyboard and serial format: kbd,<serial_device>[,baud]
+
+	kgdbwait	[KGDB] Stop kernel execution and enter the
+			kernel debugger at the earliest opportunity.
 
 	kmac=		[MIPS] korina ethernet MAC address.
 			Configure the RouterBoard 532 series on-chip
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
new file mode 100644
index 0000000..d721726
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
@@ -0,0 +1,18 @@
+Reboot property to control system reboot on PPC4xx systems:
+
+By setting "reset_type" to one of the following values, the default
+software reset mechanism may be overidden. Here the possible values of
+"reset_type":
+
+      1 - PPC4xx core reset
+      2 - PPC4xx chip reset
+      3 - PPC4xx system reset (default)
+
+Example:
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,440SPe";
+			...
+			reset-type = <2>;	/* Use chip-reset */
+		};
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
index d015dce..b0019eb 100644
--- a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
@@ -11,7 +11,7 @@
   83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
 - #gpio-cells : Should be two. The first cell is the pin number and the
   second cell is used to specify optional parameters (currently unused).
- - interrupts : Interrupt mapping for GPIO IRQ (currently unused).
+ - interrupts : Interrupt mapping for GPIO IRQ.
  - interrupt-parent : Phandle for the interrupt controller that
    services interrupts for this device.
 - gpio-controller : Marks the port as GPIO controller.
@@ -38,3 +38,23 @@
 
 See booting-without-of.txt for details of how to specify GPIO
 information for devices.
+
+To use GPIO pins as interrupt sources for peripherals, specify the
+GPIO controller as the interrupt parent and define GPIO number +
+trigger mode using the interrupts property, which is defined like
+this:
+
+interrupts = <number trigger>, where:
+ - number: GPIO pin (0..31)
+ - trigger: trigger mode:
+	2 = trigger on falling edge
+	3 = trigger on both edges
+
+Example of device using this is:
+
+	funkyfpga@0 {
+		compatible = "funky-fpga";
+		...
+		interrupts = <4 3>;
+		interrupt-parent = <&gpio1>;
+	};
diff --git a/MAINTAINERS b/MAINTAINERS
index a31a717..a8fe9b4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3319,15 +3319,17 @@
 F:	include/keys/
 F:	security/keys/
 
-KGDB
+KGDB / KDB /debug_core
 M:	Jason Wessel <jason.wessel@windriver.com>
+W:	http://kgdb.wiki.kernel.org/
 L:	kgdb-bugreport@lists.sourceforge.net
 S:	Maintained
 F:	Documentation/DocBook/kgdb.tmpl
 F:	drivers/misc/kgdbts.c
 F:	drivers/serial/kgdboc.c
+F:	include/linux/kdb.h
 F:	include/linux/kgdb.h
-F:	kernel/kgdb.c
+F:	kernel/debug/
 
 KMEMCHECK
 M:	Vegard Nossum <vegardno@ifi.uio.no>
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index d979e7c..a5fffc8 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -53,6 +53,7 @@
 
 /**
  * pci_mmap_resource - map a PCI resource into user memory space
+ * @filp: open sysfs file
  * @kobj: kobject for mapping
  * @attr: struct bin_attribute for the file being mapped
  * @vma: struct vm_area_struct passed into the mmap
@@ -60,7 +61,8 @@
  *
  * Use the bus mapping routines to map a PCI resource into userspace.
  */
-static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+static int pci_mmap_resource(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *attr,
 			     struct vm_area_struct *vma, int sparse)
 {
 	struct pci_dev *pdev = to_pci_dev(container_of(kobj,
@@ -89,14 +91,14 @@
 	return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
 }
 
-static int pci_mmap_resource_sparse(struct kobject *kobj,
+static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj,
 				    struct bin_attribute *attr,
 				    struct vm_area_struct *vma)
 {
 	return pci_mmap_resource(kobj, attr, vma, 1);
 }
 
-static int pci_mmap_resource_dense(struct kobject *kobj,
+static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
 				   struct bin_attribute *attr,
 				   struct vm_area_struct *vma)
 {
diff --git a/arch/arm/configs/am3517_evm_defconfig b/arch/arm/configs/am3517_evm_defconfig
index 66a10b5..e4f4fb5 100644
--- a/arch/arm/configs/am3517_evm_defconfig
+++ b/arch/arm/configs/am3517_evm_defconfig
@@ -422,15 +422,29 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_BCM=y
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=y
+CONFIG_CAN_DEV=y
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_TI_HECC=y
+# CONFIG_CAN_SJA1000 is not set
+
+#
+# CAN USB interfaces
+#
+# CONFIG_CAN_EMS_USB is not set
+CONFIG_CAN_DEBUG_DEVICES=y
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
 CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
-CONFIG_CFG80211_DEFAULT_PS_VALUE=0
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-# CONFIG_WIRELESS_EXT is not set
 # CONFIG_LIB80211 is not set
 
 #
@@ -517,7 +531,75 @@
 # CONFIG_SCSI_OSD_INITIATOR is not set
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_TI_DAVINCI_EMAC=y
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
 # CONFIG_ISDN is not set
 # CONFIG_PHONE is not set
 
@@ -692,7 +774,57 @@
 #
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+CONFIG_PANEL_SHARP_LQ043T1DG01=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
diff --git a/arch/arm/configs/ams_delta_defconfig b/arch/arm/configs/ams_delta_defconfig
index 3b3a3775..6d8a0c8 100644
--- a/arch/arm/configs/ams_delta_defconfig
+++ b/arch/arm/configs/ams_delta_defconfig
@@ -47,6 +47,7 @@
 # CONFIG_TASKSTATS is not set
 # CONFIG_UTS_NS is not set
 # CONFIG_AUDIT is not set
+CONFIG_TREE_PREEMPT_RCU=y
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED=y
@@ -95,9 +96,8 @@
 # Block layer
 #
 CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
 
 #
 # IO Schedulers
@@ -699,6 +699,7 @@
 CONFIG_SERIO_SERPORT=y
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_SERIO_RAW is not set
+CONFIG_SERIO_AMS_DELTA=y
 # CONFIG_GAMEPORT is not set
 
 #
@@ -835,7 +836,8 @@
 #
 # Graphics support
 #
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
 
 #
 # Display device support
@@ -1283,7 +1285,7 @@
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_LIST is not set
diff --git a/arch/arm/configs/devkit8000_defconfig b/arch/arm/configs/devkit8000_defconfig
index 61a817e..c7a6820 100644
--- a/arch/arm/configs/devkit8000_defconfig
+++ b/arch/arm/configs/devkit8000_defconfig
@@ -1,13 +1,14 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc6
-# Thu Feb  4 15:42:56 2010
+# Linux kernel version: 2.6.34-rc2
+# Wed Mar 24 13:27:25 2010
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
 CONFIG_GENERIC_GPIO=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,7 +20,9 @@
 CONFIG_ARCH_HAS_CPUFREQ=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
 CONFIG_VECTORS_BASE=0xffff0000
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 CONFIG_CONSTRUCTORS=y
@@ -60,11 +63,6 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
@@ -96,10 +94,14 @@
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
 
 #
 # Kernel Performance Events And Counters
 #
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
@@ -170,7 +172,7 @@
 CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
 # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
 # CONFIG_MUTEX_SPIN_ON_OWNER is not set
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
 
 #
 # System Type
@@ -181,6 +183,7 @@
 # CONFIG_ARCH_REALVIEW is not set
 # CONFIG_ARCH_VERSATILE is not set
 # CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
 # CONFIG_ARCH_CLPS711X is not set
 # CONFIG_ARCH_GEMINI is not set
 # CONFIG_ARCH_EBSA110 is not set
@@ -190,7 +193,6 @@
 # CONFIG_ARCH_STMP3XXX is not set
 # CONFIG_ARCH_NETX is not set
 # CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_IOP13XX is not set
 # CONFIG_ARCH_IOP32X is not set
 # CONFIG_ARCH_IOP33X is not set
@@ -207,21 +209,26 @@
 # CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_NS9XXX is not set
 # CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
 # CONFIG_ARCH_PNX4008 is not set
 # CONFIG_ARCH_PXA is not set
 # CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
 # CONFIG_ARCH_RPC is not set
 # CONFIG_ARCH_SA1100 is not set
 # CONFIG_ARCH_S3C2410 is not set
 # CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
 # CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
 # CONFIG_ARCH_SHARK is not set
 # CONFIG_ARCH_LH7A40X is not set
 # CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_DAVINCI is not set
 CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
 
 #
 # TI OMAP Implementations
@@ -237,16 +244,20 @@
 # OMAP Feature Selections
 #
 # CONFIG_OMAP_RESET_CLOCKS is not set
-# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
 CONFIG_OMAP_MCBSP=y
 # CONFIG_OMAP_MBOX_FWK is not set
 # CONFIG_OMAP_MPU_TIMER is not set
 CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
 CONFIG_OMAP_32K_TIMER_HZ=128
 CONFIG_OMAP_DM_TIMER=y
 # CONFIG_OMAP_PM_NONE is not set
 CONFIG_OMAP_PM_NOOP=y
 CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CUS=y
 
 #
 # OMAP Board Type
@@ -295,6 +306,7 @@
 # CONFIG_CPU_BPREDICT_DISABLE is not set
 CONFIG_HAS_TLS_REG=y
 CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
 # CONFIG_ARM_ERRATA_430973 is not set
 # CONFIG_ARM_ERRATA_458693 is not set
 # CONFIG_ARM_ERRATA_460075 is not set
@@ -387,7 +399,14 @@
 #
 # Power management options
 #
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_NET=y
 
@@ -395,7 +414,6 @@
 # Networking options
 #
 CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 # CONFIG_XFRM_USER is not set
@@ -666,6 +684,7 @@
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
@@ -717,6 +736,7 @@
 CONFIG_MII=y
 # CONFIG_AX88796 is not set
 # CONFIG_SMC91X is not set
+# CONFIG_TI_DAVINCI_EMAC is not set
 CONFIG_DM9000=y
 CONFIG_DM9000_DEBUGLEVEL=4
 CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
@@ -863,6 +883,7 @@
 # CONFIG_SERIAL_MAX3100 is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -891,6 +912,7 @@
 # CONFIG_I2C_OCORES is not set
 CONFIG_I2C_OMAP=y
 # CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
 
 #
 # External I2C/SMBus adapter drivers
@@ -904,15 +926,9 @@
 #
 # CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
 CONFIG_SPI=y
 # CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
@@ -944,10 +960,12 @@
 #
 # Memory mapped GPIO expanders:
 #
+# CONFIG_GPIO_IT8761E is not set
 
 #
 # I2C GPIO expanders:
 #
+# CONFIG_GPIO_MAX7300 is not set
 # CONFIG_GPIO_MAX732X is not set
 # CONFIG_GPIO_PCA953X is not set
 # CONFIG_GPIO_PCF857X is not set
@@ -984,10 +1002,12 @@
 # Multifunction device drivers
 #
 CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_ASIC3 is not set
 # CONFIG_HTC_EGPIO is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
 # CONFIG_TPS65010 is not set
 CONFIG_TWL4030_CORE=y
 CONFIG_TWL4030_POWER=y
@@ -998,22 +1018,25 @@
 # CONFIG_MFD_TC6393XB is not set
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM831X is not set
 # CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
 # CONFIG_MFD_PCF50633 is not set
 # CONFIG_MFD_MC13783 is not set
 # CONFIG_AB3100_CORE is not set
 # CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
 # CONFIG_AB4500_CORE is not set
 CONFIG_REGULATOR=y
 # CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
 # CONFIG_REGULATOR_FIXED_VOLTAGE is not set
 # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
 # CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
 # CONFIG_REGULATOR_BQ24022 is not set
 # CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
 # CONFIG_REGULATOR_MAX8660 is not set
 CONFIG_REGULATOR_TWL4030=y
 # CONFIG_REGULATOR_LP3971 is not set
@@ -1072,7 +1095,6 @@
 CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
 CONFIG_FB_OMAP2=y
 CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
-# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
 CONFIG_FB_OMAP2_NUM_FBS=3
 
 #
@@ -1080,7 +1102,9 @@
 #
 CONFIG_PANEL_GENERIC=y
 # CONFIG_PANEL_SHARP_LS037V7DW01 is not set
-CONFIG_PANEL_INNOLUX_AT070TN83=y
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -1136,6 +1160,7 @@
 CONFIG_SND_SPI=y
 CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
 # CONFIG_SND_USB_CAIAQ is not set
 CONFIG_SND_SOC=y
 CONFIG_SND_OMAP_SOC=y
@@ -1147,42 +1172,44 @@
 # CONFIG_SOUND_PRIME is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
-CONFIG_HIDRAW=y
+# CONFIG_HIDRAW is not set
 
 #
 # USB Input Devices
 #
 CONFIG_USB_HID=y
 # CONFIG_HID_PID is not set
-CONFIG_USB_HIDDEV=y
+# CONFIG_USB_HIDDEV is not set
 
 #
 # Special HID drivers
 #
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
 # CONFIG_HID_DRAGONRISE is not set
-CONFIG_HID_EZKEY=y
+# CONFIG_HID_EZKEY is not set
 # CONFIG_HID_KYE is not set
-CONFIG_HID_GYRATION=y
+# CONFIG_HID_GYRATION is not set
 # CONFIG_HID_TWINHAN is not set
 # CONFIG_HID_KENSINGTON is not set
-CONFIG_HID_LOGITECH=y
-# CONFIG_LOGITECH_FF is not set
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
 # CONFIG_HID_NTRIG is not set
-CONFIG_HID_PANTHERLORD=y
-# CONFIG_PANTHERLORD_FF is not set
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
 # CONFIG_HID_GREENASIA is not set
 # CONFIG_HID_SMARTJOYPLUS is not set
 # CONFIG_HID_TOPSEED is not set
@@ -1193,7 +1220,7 @@
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
 CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
+CONFIG_USB_DEBUG=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 
 #
@@ -1202,7 +1229,7 @@
 # CONFIG_USB_DEVICEFS is not set
 # CONFIG_USB_DEVICE_CLASS is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_OTG is not set
+CONFIG_USB_OTG=y
 # CONFIG_USB_OTG_WHITELIST is not set
 # CONFIG_USB_OTG_BLACKLIST_HUB is not set
 CONFIG_USB_MON=y
@@ -1230,15 +1257,15 @@
 #
 # OMAP 343x high speed USB support
 #
-CONFIG_USB_MUSB_HOST=y
+# CONFIG_USB_MUSB_HOST is not set
 # CONFIG_USB_MUSB_PERIPHERAL is not set
-# CONFIG_USB_MUSB_OTG is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
 CONFIG_USB_MUSB_HDRC_HCD=y
 # CONFIG_MUSB_PIO_ONLY is not set
 CONFIG_USB_INVENTRA_DMA=y
 # CONFIG_USB_TI_CPPI_DMA is not set
-# CONFIG_USB_MUSB_DEBUG is not set
+CONFIG_USB_MUSB_DEBUG=y
 
 #
 # USB Device Class drivers
@@ -1291,7 +1318,6 @@
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
@@ -1304,9 +1330,8 @@
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
 CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG is not set
+CONFIG_USB_GADGET_DEBUG=y
 # CONFIG_USB_GADGET_DEBUG_FILES is not set
 CONFIG_USB_GADGET_VBUS_DRAW=2
 CONFIG_USB_GADGET_SELECTED=y
@@ -1314,8 +1339,7 @@
 # CONFIG_USB_GADGET_ATMEL_USBA is not set
 # CONFIG_USB_GADGET_FSL_USB2 is not set
 # CONFIG_USB_GADGET_LH7A40X is not set
-CONFIG_USB_GADGET_OMAP=y
-CONFIG_USB_OMAP=y
+# CONFIG_USB_GADGET_OMAP is not set
 # CONFIG_USB_GADGET_PXA25X is not set
 # CONFIG_USB_GADGET_R8A66597 is not set
 # CONFIG_USB_GADGET_PXA27X is not set
@@ -1330,19 +1354,20 @@
 # CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_LANGWELL is not set
 # CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_GADGET_DUALSPEED=y
 # CONFIG_USB_ZERO is not set
-CONFIG_USB_AUDIO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_GADGETFS=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=y
+# CONFIG_USB_ETH_RNDIS is not set
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
 # CONFIG_USB_FILE_STORAGE is not set
 # CONFIG_USB_MASS_STORAGE is not set
-CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_G_SERIAL is not set
 # CONFIG_USB_MIDI_GADGET is not set
-CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_G_PRINTER is not set
 # CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
 # CONFIG_USB_G_MULTI is not set
 
 #
@@ -1373,8 +1398,6 @@
 CONFIG_MMC_SDHCI_PLTFM=m
 # CONFIG_MMC_OMAP is not set
 CONFIG_MMC_OMAP_HS=y
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
 CONFIG_MMC_SPI=m
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1392,11 +1415,11 @@
 # CONFIG_LEDS_REGULATOR is not set
 # CONFIG_LEDS_BD2802 is not set
 # CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-CONFIG_LEDS_TRIGGERS=y
 # CONFIG_LEDS_TRIGGER_TIMER is not set
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1580,6 +1603,7 @@
 CONFIG_UBIFS_FS_LZO=y
 CONFIG_UBIFS_FS_ZLIB=y
 # CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
 CONFIG_CRAMFS=y
 # CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
@@ -1606,6 +1630,7 @@
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap3_defconfig
index d6ad921..94dfcf0a 100644
--- a/arch/arm/configs/omap3_defconfig
+++ b/arch/arm/configs/omap3_defconfig
@@ -1,13 +1,14 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc5
-# Tue Jan 26 11:05:31 2010
+# Linux kernel version: 2.6.34-rc7
+# Thu May 13 10:54:43 2010
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
 CONFIG_GENERIC_GPIO=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,7 +20,9 @@
 CONFIG_ARCH_HAS_CPUFREQ=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
 CONFIG_OPROFILE_ARMV6=y
 CONFIG_OPROFILE_ARM11_CORE=y
 CONFIG_OPROFILE_ARMV7=y
@@ -63,12 +66,7 @@
 # CONFIG_TREE_RCU_TRACE is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_CGROUPS is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
@@ -100,17 +98,21 @@
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
 
 #
 # Kernel Performance Events And Counters
 #
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
 CONFIG_OPROFILE=y
 CONFIG_HAVE_OPROFILE=y
 CONFIG_KPROBES=y
@@ -189,6 +191,7 @@
 # CONFIG_ARCH_REALVIEW is not set
 # CONFIG_ARCH_VERSATILE is not set
 # CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
 # CONFIG_ARCH_CLPS711X is not set
 # CONFIG_ARCH_GEMINI is not set
 # CONFIG_ARCH_EBSA110 is not set
@@ -198,7 +201,6 @@
 # CONFIG_ARCH_STMP3XXX is not set
 # CONFIG_ARCH_NETX is not set
 # CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_IOP13XX is not set
 # CONFIG_ARCH_IOP32X is not set
 # CONFIG_ARCH_IOP33X is not set
@@ -215,21 +217,26 @@
 # CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_NS9XXX is not set
 # CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
 # CONFIG_ARCH_PNX4008 is not set
 # CONFIG_ARCH_PXA is not set
 # CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
 # CONFIG_ARCH_RPC is not set
 # CONFIG_ARCH_SA1100 is not set
 # CONFIG_ARCH_S3C2410 is not set
 # CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
 # CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
 # CONFIG_ARCH_SHARK is not set
 # CONFIG_ARCH_LH7A40X is not set
 # CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_DAVINCI is not set
 CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
 
 #
 # TI OMAP Implementations
@@ -254,6 +261,7 @@
 # CONFIG_OMAP_MBOX_FWK is not set
 # CONFIG_OMAP_MPU_TIMER is not set
 CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
 CONFIG_OMAP_32K_TIMER_HZ=128
 CONFIG_OMAP_DM_TIMER=y
 # CONFIG_OMAP_PM_NONE is not set
@@ -264,7 +272,7 @@
 # OMAP Core Type
 #
 CONFIG_ARCH_OMAP2420=y
-# CONFIG_ARCH_OMAP2430 is not set
+CONFIG_ARCH_OMAP2430=y
 CONFIG_ARCH_OMAP3430=y
 CONFIG_OMAP_PACKAGE_CBB=y
 CONFIG_OMAP_PACKAGE_CUS=y
@@ -276,8 +284,9 @@
 CONFIG_MACH_OMAP2_TUSB6010=y
 CONFIG_MACH_OMAP_H4=y
 CONFIG_MACH_OMAP_APOLLON=y
-# CONFIG_MACH_OMAP_2430SDP is not set
+CONFIG_MACH_OMAP_2430SDP=y
 CONFIG_MACH_OMAP3_BEAGLE=y
+CONFIG_MACH_DEVKIT8000=y
 CONFIG_MACH_OMAP_LDP=y
 CONFIG_MACH_OVERO=y
 CONFIG_MACH_OMAP3EVM=y
@@ -294,6 +303,7 @@
 CONFIG_MACH_OMAP_ZOOM3=y
 CONFIG_MACH_CM_T35=y
 CONFIG_MACH_IGEP0020=y
+CONFIG_MACH_SBC3530=y
 CONFIG_MACH_OMAP_3630SDP=y
 CONFIG_MACH_OMAP_4430SDP=y
 # CONFIG_OMAP3_EMU is not set
@@ -330,11 +340,16 @@
 # CONFIG_CPU_DCACHE_DISABLE is not set
 # CONFIG_CPU_BPREDICT_DISABLE is not set
 CONFIG_HAS_TLS_REG=y
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
 CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
 # CONFIG_ARM_ERRATA_411920 is not set
 # CONFIG_ARM_ERRATA_430973 is not set
 # CONFIG_ARM_ERRATA_458693 is not set
 # CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_PL310_ERRATA_588369 is not set
 CONFIG_ARM_GIC=y
 CONFIG_COMMON_CLKDEV=y
 
@@ -368,6 +383,7 @@
 # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
 # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
 # CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -390,7 +406,7 @@
 #
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8"
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyS2,115200"
 # CONFIG_XIP_KERNEL is not set
 CONFIG_KEXEC=y
 CONFIG_ATAGS_PROC=y
@@ -443,7 +459,8 @@
 #
 CONFIG_PM=y
 CONFIG_PM_DEBUG=y
-CONFIG_PM_VERBOSE=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
 CONFIG_CAN_PM_TRACE=y
 CONFIG_PM_SLEEP=y
 CONFIG_SUSPEND=y
@@ -451,6 +468,7 @@
 CONFIG_SUSPEND_FREEZER=y
 # CONFIG_APM_EMULATION is not set
 CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_NET=y
 
@@ -458,7 +476,6 @@
 # Networking options
 #
 CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_XFRM_USER=y
@@ -544,7 +561,6 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
 # CONFIG_IRDA is not set
@@ -584,7 +600,7 @@
 # CONFIG_CFG80211_REG_DEBUG is not set
 CONFIG_CFG80211_DEFAULT_PS=y
 # CONFIG_CFG80211_DEBUGFS is not set
-CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
 CONFIG_CFG80211_WEXT=y
 CONFIG_WIRELESS_EXT_SYSFS=y
 CONFIG_LIB80211=y
@@ -676,7 +692,6 @@
 # CONFIG_MTD_COMPLEX_MAPPINGS is not set
 # CONFIG_MTD_PHYSMAP is not set
 # CONFIG_MTD_ARM_INTEGRATOR is not set
-CONFIG_MTD_OMAP_NOR=y
 # CONFIG_MTD_PLATRAM is not set
 
 #
@@ -754,6 +769,7 @@
 # CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_DS1682 is not set
 # CONFIG_TI_DAC7512 is not set
 # CONFIG_C2PORT is not set
@@ -773,6 +789,7 @@
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
@@ -839,12 +856,14 @@
 # CONFIG_NATIONAL_PHY is not set
 # CONFIG_STE10XP is not set
 # CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 # CONFIG_AX88796 is not set
 CONFIG_SMC91X=y
+# CONFIG_TI_DAVINCI_EMAC is not set
 # CONFIG_DM9000 is not set
 # CONFIG_ENC28J60 is not set
 # CONFIG_ETHOC is not set
@@ -881,6 +900,7 @@
 CONFIG_LIBERTAS_SDIO=y
 # CONFIG_LIBERTAS_SPI is not set
 CONFIG_LIBERTAS_DEBUG=y
+# CONFIG_LIBERTAS_MESH is not set
 # CONFIG_P54_COMMON is not set
 # CONFIG_RT2X00 is not set
 # CONFIG_WL12XX is not set
@@ -902,6 +922,7 @@
 CONFIG_USB_NET_CDCETHER=y
 # CONFIG_USB_NET_CDC_EEM is not set
 # CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
 # CONFIG_USB_NET_SMSC95XX is not set
 # CONFIG_USB_NET_GL620A is not set
 CONFIG_USB_NET_NET1080=y
@@ -917,6 +938,8 @@
 CONFIG_USB_KC2190=y
 CONFIG_USB_NET_ZAURUS=y
 # CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
@@ -1012,6 +1035,7 @@
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_CM109 is not set
 CONFIG_INPUT_TWL4030_PWRBUTTON=y
+# CONFIG_INPUT_TWL4030_VIBRA is not set
 # CONFIG_INPUT_UINPUT is not set
 # CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
 
@@ -1055,6 +1079,7 @@
 # CONFIG_SERIAL_MAX3100 is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -1083,6 +1108,7 @@
 # CONFIG_I2C_OCORES is not set
 CONFIG_I2C_OMAP=y
 # CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
 
 #
 # External I2C/SMBus adapter drivers
@@ -1096,15 +1122,9 @@
 #
 # CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
 CONFIG_SPI=y
 # CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
@@ -1136,10 +1156,12 @@
 #
 # Memory mapped GPIO expanders:
 #
+# CONFIG_GPIO_IT8761E is not set
 
 #
 # I2C GPIO expanders:
 #
+# CONFIG_GPIO_MAX7300 is not set
 # CONFIG_GPIO_MAX732X is not set
 # CONFIG_GPIO_PCA953X is not set
 # CONFIG_GPIO_PCF857X is not set
@@ -1204,10 +1226,11 @@
 # CONFIG_SENSORS_ADM1029 is not set
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
 # CONFIG_SENSORS_ADT7462 is not set
 # CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
@@ -1262,7 +1285,7 @@
 # CONFIG_SENSORS_LIS3_I2C is not set
 # CONFIG_THERMAL is not set
 CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
 
 #
 # Watchdog Device Drivers
@@ -1270,6 +1293,7 @@
 # CONFIG_SOFT_WATCHDOG is not set
 CONFIG_OMAP_WATCHDOG=y
 CONFIG_TWL4030_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
 
 #
 # USB-based Watchdog Cards
@@ -1286,14 +1310,16 @@
 # Multifunction device drivers
 #
 CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_ASIC3 is not set
 # CONFIG_HTC_EGPIO is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
 # CONFIG_TPS65010 is not set
-# CONFIG_MENELAUS is not set
+CONFIG_MENELAUS=y
 CONFIG_TWL4030_CORE=y
-# CONFIG_TWL4030_POWER is not set
+CONFIG_TWL4030_POWER=y
 CONFIG_TWL4030_CODEC=y
 # CONFIG_MFD_TMIO is not set
 # CONFIG_MFD_T7L66XB is not set
@@ -1301,27 +1327,30 @@
 # CONFIG_MFD_TC6393XB is not set
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM831X is not set
 # CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
 # CONFIG_MFD_PCF50633 is not set
 # CONFIG_MFD_MC13783 is not set
 # CONFIG_AB3100_CORE is not set
 # CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
 # CONFIG_AB4500_CORE is not set
 CONFIG_REGULATOR=y
 # CONFIG_REGULATOR_DEBUG is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
 # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
 # CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
 # CONFIG_REGULATOR_BQ24022 is not set
 # CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
 # CONFIG_REGULATOR_MAX8660 is not set
 CONFIG_REGULATOR_TWL4030=y
 # CONFIG_REGULATOR_LP3971 is not set
-# CONFIG_REGULATOR_TPS65023 is not set
-# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_REGULATOR_TPS65023=y
+CONFIG_REGULATOR_TPS6507X=y
 # CONFIG_MEDIA_SUPPORT is not set
 
 #
@@ -1333,9 +1362,9 @@
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FB_DDC is not set
 # CONFIG_FB_BOOT_VESA_SUPPORT is not set
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
 # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
@@ -1358,19 +1387,12 @@
 # CONFIG_FB_METRONOME is not set
 # CONFIG_FB_MB862XX is not set
 # CONFIG_FB_BROADSHEET is not set
-CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP is not set
 CONFIG_FB_OMAP_LCD_VGA=y
-# CONFIG_FB_OMAP_031M3R is not set
-# CONFIG_FB_OMAP_048M3R is not set
-CONFIG_FB_OMAP_079M3R=y
-# CONFIG_FB_OMAP_092M9R is not set
-# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
-# CONFIG_FB_OMAP_LCD_MIPID is not set
-# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
-CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
 # CONFIG_OMAP2_DSS is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_L4F00242T03 is not set
 # CONFIG_LCD_LMS283GF05 is not set
 # CONFIG_LCD_LTV350QV is not set
 # CONFIG_LCD_ILI9320 is not set
@@ -1448,6 +1470,7 @@
 CONFIG_SND_SPI=y
 CONFIG_SND_USB=y
 CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
 # CONFIG_SND_USB_CAIAQ is not set
 CONFIG_SND_SOC=y
 CONFIG_SND_OMAP_SOC=y
@@ -1479,6 +1502,7 @@
 #
 # Special HID drivers
 #
+# CONFIG_HID_3M_PCT is not set
 # CONFIG_HID_A4TECH is not set
 # CONFIG_HID_APPLE is not set
 # CONFIG_HID_BELKIN is not set
@@ -1492,13 +1516,18 @@
 # CONFIG_HID_TWINHAN is not set
 # CONFIG_HID_KENSINGTON is not set
 # CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
 # CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
 # CONFIG_HID_MONTEREY is not set
 # CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
 # CONFIG_HID_PANTHERLORD is not set
 # CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
 # CONFIG_HID_SAMSUNG is not set
 # CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
 # CONFIG_HID_SUNPLUS is not set
 # CONFIG_HID_GREENASIA is not set
 # CONFIG_HID_SMARTJOYPLUS is not set
@@ -1545,6 +1574,10 @@
 CONFIG_USB_MUSB_SOC=y
 
 #
+# OMAP 243x high speed USB support
+#
+
+#
 # OMAP 343x high speed USB support
 #
 # CONFIG_USB_MUSB_HOST is not set
@@ -1608,7 +1641,6 @@
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
@@ -1621,7 +1653,6 @@
 # CONFIG_USB_IOWARRIOR is not set
 CONFIG_USB_TEST=y
 # CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -1659,6 +1690,7 @@
 # CONFIG_USB_MIDI_GADGET is not set
 # CONFIG_USB_G_PRINTER is not set
 # CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
 # CONFIG_USB_G_MULTI is not set
 
 #
@@ -1686,10 +1718,8 @@
 # MMC/SD/SDIO Host Controller Drivers
 #
 # CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
 # CONFIG_MMC_SPI is not set
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1707,11 +1737,11 @@
 # CONFIG_LEDS_REGULATOR is not set
 # CONFIG_LEDS_BD2802 is not set
 # CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1751,6 +1781,7 @@
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL92330=y
 CONFIG_RTC_DRV_TWL4030=y
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
@@ -1799,6 +1830,11 @@
 # CONFIG_STAGING is not set
 
 #
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
 # File systems
 #
 CONFIG_EXT2_FS=y
@@ -1826,6 +1862,7 @@
 CONFIG_QUOTA=y
 # CONFIG_QUOTA_NETLINK_INTERFACE is not set
 CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
 CONFIG_QUOTA_TREE=y
 # CONFIG_QFMT_V1 is not set
 CONFIG_QFMT_V2=y
@@ -1897,6 +1934,7 @@
 CONFIG_UBIFS_FS_LZO=y
 CONFIG_UBIFS_FS_ZLIB=y
 # CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
 CONFIG_CRAMFS=y
 # CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
@@ -1924,6 +1962,7 @@
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
@@ -2024,6 +2063,7 @@
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
+# CONFIG_PROVE_RCU is not set
 CONFIG_LOCKDEP=y
 CONFIG_LOCK_STAT=y
 # CONFIG_DEBUG_LOCKDEP is not set
@@ -2053,13 +2093,9 @@
 # CONFIG_LATENCYTOP is not set
 # CONFIG_SYSCTL_SYSCALL_CHECK is not set
 # CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
 CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
 CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
 CONFIG_TRACING_SUPPORT=y
 CONFIG_FTRACE=y
 # CONFIG_FUNCTION_TRACER is not set
@@ -2199,7 +2235,7 @@
 #
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
@@ -2222,3 +2258,4 @@
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig
index a6dd6d1..b02e371 100644
--- a/arch/arm/configs/omap3_evm_defconfig
+++ b/arch/arm/configs/omap3_evm_defconfig
@@ -911,7 +911,56 @@
 #
 # CONFIG_VGASTATE is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set
+CONFIG_PANEL_SHARP_LS037V7DW01=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
diff --git a/arch/arm/configs/omap3_stalker_lks_defconfig b/arch/arm/configs/omap3_stalker_lks_defconfig
new file mode 100644
index 0000000..83365f0
--- /dev/null
+++ b/arch/arm/configs/omap3_stalker_lks_defconfig
@@ -0,0 +1,1691 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc7
+# Mon May 17 16:57:28 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
+# CONFIG_OMAP_MCBSP is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CUS=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_OMAP3_PANDORA is not set
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_IGEP0020 is not set
+CONFIG_MACH_SBC3530=y
+# CONFIG_MACH_OMAP_3630SDP is not set
+# CONFIG_OMAP3_EMU is not set
+# CONFIG_OMAP3_SDRC_AC_TIMING is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+# CONFIG_MTD_NAND_OMAP2 is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+# CONFIG_MTD_ONENAND_GENERIC is not set
+CONFIG_MTD_ONENAND_OMAP2=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_TI_DAVINCI_EMAC is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_OMAP2_DSS is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=y
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_ZERO_HNPTEST is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index a96bca2..1fb0456 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32
-# Sun Dec  6 23:37:45 2009
+# Linux kernel version: 2.6.34-rc7
+# Wed May 12 12:26:05 2010
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -9,6 +9,7 @@
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_PROC_CPU=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_LOCKDEP_SUPPORT=y
@@ -20,6 +21,7 @@
 CONFIG_ARCH_HAS_CPUFREQ=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_VECTORS_BASE=0xffff0000
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -33,28 +35,33 @@
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_LOCALVERSION=""
 CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
 CONFIG_BSD_PROCESS_ACCT=y
 # CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
 
 #
 # RCU Subsystem
 #
 CONFIG_TREE_RCU=y
 # CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_FANOUT=32
 # CONFIG_RCU_FANOUT_EXACT is not set
 # CONFIG_TREE_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
@@ -64,6 +71,7 @@
 CONFIG_RD_GZIP=y
 # CONFIG_RD_BZIP2 is not set
 # CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
@@ -85,10 +93,14 @@
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
 
 #
 # Kernel Performance Events And Counters
 #
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLUB_DEBUG=y
 CONFIG_COMPAT_BRK=y
@@ -127,14 +139,41 @@
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
 # CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
 # CONFIG_FREEZER is not set
 
 #
@@ -146,6 +185,7 @@
 # CONFIG_ARCH_REALVIEW is not set
 # CONFIG_ARCH_VERSATILE is not set
 # CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
 # CONFIG_ARCH_CLPS711X is not set
 # CONFIG_ARCH_GEMINI is not set
 # CONFIG_ARCH_EBSA110 is not set
@@ -155,7 +195,6 @@
 # CONFIG_ARCH_STMP3XXX is not set
 # CONFIG_ARCH_NETX is not set
 # CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_IOP13XX is not set
 # CONFIG_ARCH_IOP32X is not set
 # CONFIG_ARCH_IOP33X is not set
@@ -163,6 +202,7 @@
 # CONFIG_ARCH_IXP2000 is not set
 # CONFIG_ARCH_IXP4XX is not set
 # CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
 # CONFIG_ARCH_KIRKWOOD is not set
 # CONFIG_ARCH_LOKI is not set
 # CONFIG_ARCH_MV78XX0 is not set
@@ -171,25 +211,32 @@
 # CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_NS9XXX is not set
 # CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
 # CONFIG_ARCH_PNX4008 is not set
 # CONFIG_ARCH_PXA is not set
 # CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
 # CONFIG_ARCH_RPC is not set
 # CONFIG_ARCH_SA1100 is not set
 # CONFIG_ARCH_S3C2410 is not set
 # CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
 # CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
 # CONFIG_ARCH_SHARK is not set
 # CONFIG_ARCH_LH7A40X is not set
 # CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
 # CONFIG_ARCH_DAVINCI is not set
 CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
 
 #
 # TI OMAP Implementations
 #
 # CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
 # CONFIG_ARCH_OMAP2 is not set
 # CONFIG_ARCH_OMAP3 is not set
 CONFIG_ARCH_OMAP4=y
@@ -205,10 +252,6 @@
 CONFIG_OMAP_32K_TIMER=y
 CONFIG_OMAP_32K_TIMER_HZ=128
 CONFIG_OMAP_DM_TIMER=y
-# CONFIG_OMAP_LL_DEBUG_UART1 is not set
-# CONFIG_OMAP_LL_DEBUG_UART2 is not set
-CONFIG_OMAP_LL_DEBUG_UART3=y
-# CONFIG_OMAP_LL_DEBUG_NONE is not set
 # CONFIG_OMAP_PM_NONE is not set
 CONFIG_OMAP_PM_NOOP=y
 
@@ -243,13 +286,16 @@
 # CONFIG_CPU_BPREDICT_DISABLE is not set
 CONFIG_HAS_TLS_REG=y
 CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
 CONFIG_CACHE_L2X0=y
 CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_CPU_HAS_PMU=y
 # CONFIG_ARM_ERRATA_430973 is not set
 # CONFIG_ARM_ERRATA_458693 is not set
 # CONFIG_ARM_ERRATA_460075 is not set
 CONFIG_PL310_ERRATA_588369=y
 CONFIG_ARM_GIC=y
+CONFIG_COMMON_CLKDEV=y
 
 #
 # Bus support
@@ -280,6 +326,7 @@
 # CONFIG_THUMB2_KERNEL is not set
 CONFIG_AEABI=y
 CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
 # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
 # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
 # CONFIG_HIGHMEM is not set
@@ -294,8 +341,6 @@
 # CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
 # CONFIG_KSM is not set
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 # CONFIG_LEDS is not set
@@ -343,7 +388,83 @@
 #
 # CONFIG_PM is not set
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
-# CONFIG_NET is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
 
 #
 # Device Drivers
@@ -360,17 +481,24 @@
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
 # CONFIG_MTD is not set
 # CONFIG_PARPORT is not set
 CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
 # CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=16384
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
 # CONFIG_MG_DISK is not set
 # CONFIG_MISC_DEVICES is not set
 CONFIG_HAVE_IDE=y
@@ -379,12 +507,56 @@
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 # CONFIG_SCSI is not set
 # CONFIG_SCSI_DMA is not set
 # CONFIG_SCSI_NETLINK is not set
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+CONFIG_KS8851=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
 # CONFIG_PHONE is not set
 
 #
@@ -393,6 +565,7 @@
 CONFIG_INPUT=y
 # CONFIG_INPUT_FF_MEMLESS is not set
 # CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
 
 #
 # Userland interfaces
@@ -445,8 +618,10 @@
 #
 # Non-8250 serial port support
 #
+# CONFIG_SERIAL_MAX3100 is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -456,8 +631,58 @@
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-# CONFIG_SPI is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
 
 #
 # PPS support
@@ -471,10 +696,17 @@
 #
 # Memory mapped GPIO expanders:
 #
+# CONFIG_GPIO_IT8761E is not set
 
 #
 # I2C GPIO expanders:
 #
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TWL4030 is not set
+# CONFIG_GPIO_ADP5588 is not set
 
 #
 # PCI GPIO expanders:
@@ -483,6 +715,9 @@
 #
 # SPI GPIO expanders:
 #
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
 
 #
 # AC97 GPIO expanders:
@@ -492,7 +727,15 @@
 # CONFIG_HWMON is not set
 # CONFIG_THERMAL is not set
 CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
 CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
 CONFIG_SSB_POSSIBLE=y
 
 #
@@ -504,15 +747,46 @@
 # Multifunction device drivers
 #
 # CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_ASIC3 is not set
 # CONFIG_HTC_EGPIO is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
 # CONFIG_MFD_TMIO is not set
 # CONFIG_MFD_T7L66XB is not set
 # CONFIG_MFD_TC6387XB is not set
 # CONFIG_MFD_TC6393XB is not set
-# CONFIG_REGULATOR is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
 # CONFIG_MEDIA_SUPPORT is not set
 
 #
@@ -536,12 +810,94 @@
 # CONFIG_SOUND is not set
 # CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
 # CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
 # CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
 # CONFIG_DMADEVICES is not set
 # CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
@@ -564,9 +920,10 @@
 CONFIG_JBD=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FS_POSIX_ACL=y
 # CONFIG_XFS_FS is not set
 # CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
 # CONFIG_BTRFS_FS is not set
 # CONFIG_NILFS2_FS is not set
 CONFIG_FILE_LOCKING=y
@@ -575,7 +932,9 @@
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
 CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
 CONFIG_QUOTA_TREE=y
 # CONFIG_QFMT_V1 is not set
 CONFIG_QFMT_V2=y
@@ -624,6 +983,7 @@
 # CONFIG_BEFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
@@ -634,6 +994,28 @@
 # CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
 
 #
 # Partition Types
@@ -696,6 +1078,7 @@
 # CONFIG_NLS_KOI8_R is not set
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
 
 #
 # Kernel hacking
@@ -750,13 +1133,11 @@
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
 # CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
 # CONFIG_PAGE_POISONING is not set
 CONFIG_HAVE_FUNCTION_TRACER=y
 CONFIG_TRACING_SUPPORT=y
 # CONFIG_FTRACE is not set
-# CONFIG_BRANCH_PROFILE_NONE is not set
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_PROFILE_ALL_BRANCHES is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
@@ -765,6 +1146,7 @@
 # CONFIG_DEBUG_ERRORS is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
 
 #
 # Security options
@@ -772,7 +1154,11 @@
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
 # CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 CONFIG_CRYPTO=y
 
 #
@@ -791,6 +1177,7 @@
 CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
 CONFIG_CRYPTO_WORKQUEUE=y
 # CONFIG_CRYPTO_CRYPTD is not set
 # CONFIG_CRYPTO_AUTHENC is not set
@@ -889,3 +1276,4 @@
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 473f9e1..56d4928 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -784,6 +784,7 @@
 # CONFIG_KEYBOARD_NEWTON is not set
 # CONFIG_KEYBOARD_STOWAWAY is not set
 CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_TWL4030=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
@@ -809,6 +810,7 @@
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
 CONFIG_INPUT_UINPUT=m
 
 #
@@ -1110,7 +1112,40 @@
 #
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+
+# Frame buffer hardware drivers
+#
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+# CONFIG_OMAP2_DSS_DPI is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_SDI=y
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_PANEL_ACX565AKM=y
+
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -1127,6 +1162,8 @@
 #
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
 CONFIG_SOUND=y
 # CONFIG_SOUND_OSS_CORE is not set
 CONFIG_SND=y
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c4b2ea3..e51b1e8 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -20,6 +20,7 @@
 	KM_SOFTIRQ1,
 	KM_L1_CACHE,
 	KM_L2_CACHE,
+	KM_KDB,
 	KM_TYPE_NR
 };
 
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a5b846b..c868a88 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -98,6 +98,11 @@
 	gdb_regs[_CPSR]		= thread_regs->ARM_cpsr;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->ARM_pc = pc;
+}
+
 static int compiled_break;
 
 int kgdb_arch_handle_exception(int exception_vector, int signo,
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index 3d725ae..d029d1f 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -69,6 +69,8 @@
 			writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
 		}
 #endif
+		if (cmd->execute_func)
+			cmd->execute_func(cmd);
 		PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
 		list_add_tail(&cmd->list, &active_commands[id]);
 		if (!channel_active)
@@ -116,6 +118,7 @@
 
 	cmd.dmov_cmd.cmdptr = cmdptr;
 	cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
+	cmd.dmov_cmd.execute_func = NULL;
 	cmd.id = id;
 	init_completion(&cmd.complete);
 
@@ -221,6 +224,8 @@
 				cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
 				list_del(&cmd->list);
 				list_add_tail(&cmd->list, &active_commands[id]);
+				if (cmd->execute_func)
+					cmd->execute_func(cmd);
 				PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
 				writel(cmd->cmdptr, DMOV_CMD_PTR(id));
 			}
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 04c51cc..00f9bbf 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -28,6 +28,8 @@
 	void (*complete_func)(struct msm_dmov_cmd *cmd,
 			      unsigned int result,
 			      struct msm_dmov_errdata *err);
+	void (*execute_func)(struct msm_dmov_cmd *cmd);
+	void *data;
 };
 
 void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 27f4897..b18d7c2 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -152,6 +152,16 @@
 	  Support for the Amstrad E3 (codename Delta) videophone. Say Y here
 	  if you have such a device.
 
+config AMS_DELTA_FIQ
+	bool "Fast Interrupt Request (FIQ) support for the E3"
+	depends on MACH_AMS_DELTA
+	select FIQ
+	help
+	  Provide a FIQ handler for the E3.
+	  This allows for fast handling of interrupts generated
+	  by the clock line of the E3 mailboard (or a PS/2 keyboard)
+	  connected to the GPIO based external keyboard port.
+
 config MACH_OMAP_GENERIC
 	bool "Generic OMAP board"
 	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index b6a537c..ea231c7 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -37,6 +37,7 @@
 obj-$(CONFIG_MACH_OMAP_PALMTT)		+= board-palmtt.o
 obj-$(CONFIG_MACH_NOKIA770)		+= board-nokia770.o
 obj-$(CONFIG_MACH_AMS_DELTA)		+= board-ams-delta.o
+obj-$(CONFIG_AMS_DELTA_FIQ)		+= ams-delta-fiq.o ams-delta-fiq-handler.o
 obj-$(CONFIG_MACH_SX1)			+= board-sx1.o board-sx1-mmc.o
 obj-$(CONFIG_MACH_HERALD)		+= board-htcherald.o
 
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
new file mode 100644
index 0000000..927d5a1
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -0,0 +1,278 @@
+/*
+ *  linux/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+ *
+ *  Based on  linux/arch/arm/lib/floppydma.S
+ *  Renamed and modified to work with 2.6 kernel by Matt Callow
+ *  Copyright (C) 1995, 1996 Russell King
+ *  Copyright (C) 2004 Pete Trapps
+ *  Copyright (C) 2006 Matt Callow
+ *  Copyright (C) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#include <plat/io.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+/*
+ * GPIO related definitions, copied from arch/arm/plat-omap/gpio.c.
+ * Unfortunately, those were not placed in a separate header file.
+ */
+#define OMAP1510_GPIO_BASE		0xFFFCE000
+#define OMAP1510_GPIO_DATA_INPUT	0x00
+#define OMAP1510_GPIO_DATA_OUTPUT	0x04
+#define OMAP1510_GPIO_DIR_CONTROL	0x08
+#define OMAP1510_GPIO_INT_CONTROL	0x0c
+#define OMAP1510_GPIO_INT_MASK		0x10
+#define OMAP1510_GPIO_INT_STATUS	0x14
+#define OMAP1510_GPIO_PIN_CONTROL	0x18
+
+/* GPIO register bitmasks */
+#define KEYBRD_DATA_MASK		(0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_DATA)
+#define KEYBRD_CLK_MASK			(0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_CLK)
+#define MODEM_IRQ_MASK			(0x1 << AMS_DELTA_GPIO_PIN_MODEM_IRQ)
+#define HOOK_SWITCH_MASK		(0x1 << AMS_DELTA_GPIO_PIN_HOOK_SWITCH)
+#define OTHERS_MASK			(MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
+
+/* IRQ handler register bitmasks */
+#define DEFERRED_FIQ_MASK		(0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
+#define GPIO_BANK1_MASK  		(0x1 << INT_GPIO_BANK1)
+
+/* Driver buffer byte offsets */
+#define BUF_MASK			(FIQ_MASK * 4)
+#define BUF_STATE			(FIQ_STATE * 4)
+#define BUF_KEYS_CNT			(FIQ_KEYS_CNT * 4)
+#define BUF_TAIL_OFFSET			(FIQ_TAIL_OFFSET * 4)
+#define BUF_HEAD_OFFSET			(FIQ_HEAD_OFFSET * 4)
+#define BUF_BUF_LEN			(FIQ_BUF_LEN * 4)
+#define BUF_KEY				(FIQ_KEY * 4)
+#define BUF_MISSED_KEYS			(FIQ_MISSED_KEYS * 4)
+#define BUF_BUFFER_START		(FIQ_BUFFER_START * 4)
+#define BUF_GPIO_INT_MASK		(FIQ_GPIO_INT_MASK * 4)
+#define BUF_KEYS_HICNT			(FIQ_KEYS_HICNT * 4)
+#define BUF_IRQ_PEND			(FIQ_IRQ_PEND * 4)
+#define BUF_SIR_CODE_L1			(FIQ_SIR_CODE_L1 * 4)
+#define BUF_SIR_CODE_L2			(IRQ_SIR_CODE_L2 * 4)
+#define BUF_CNT_INT_00			(FIQ_CNT_INT_00 * 4)
+#define BUF_CNT_INT_KEY			(FIQ_CNT_INT_KEY * 4)
+#define BUF_CNT_INT_MDM			(FIQ_CNT_INT_MDM * 4)
+#define BUF_CNT_INT_03			(FIQ_CNT_INT_03 * 4)
+#define BUF_CNT_INT_HSW			(FIQ_CNT_INT_HSW * 4)
+#define BUF_CNT_INT_05			(FIQ_CNT_INT_05 * 4)
+#define BUF_CNT_INT_06			(FIQ_CNT_INT_06 * 4)
+#define BUF_CNT_INT_07			(FIQ_CNT_INT_07 * 4)
+#define BUF_CNT_INT_08			(FIQ_CNT_INT_08 * 4)
+#define BUF_CNT_INT_09			(FIQ_CNT_INT_09 * 4)
+#define BUF_CNT_INT_10			(FIQ_CNT_INT_10 * 4)
+#define BUF_CNT_INT_11			(FIQ_CNT_INT_11 * 4)
+#define BUF_CNT_INT_12			(FIQ_CNT_INT_12 * 4)
+#define BUF_CNT_INT_13			(FIQ_CNT_INT_13 * 4)
+#define BUF_CNT_INT_14			(FIQ_CNT_INT_14 * 4)
+#define BUF_CNT_INT_15			(FIQ_CNT_INT_15 * 4)
+#define BUF_CIRC_BUFF			(FIQ_CIRC_BUFF * 4)
+
+
+/*
+ * Register useage
+ * r8  - temporary
+ * r9  - the driver buffer
+ * r10 - temporary
+ * r11 - interrupts mask
+ * r12 - base pointers
+ * r13 - interrupts status
+ */
+
+	.text
+
+	.global qwerty_fiqin_end
+
+ENTRY(qwerty_fiqin_start)
+	@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+	@ FIQ intrrupt handler
+	ldr r12, omap_ih1_base			@ set pointer to level1 handler
+
+	ldr r11, [r12, #IRQ_MIR_REG_OFFSET]	@ fetch interrupts mask
+
+	ldr r13, [r12, #IRQ_ITR_REG_OFFSET]	@ fetch interrupts status
+	bics r13, r13, r11			@ clear masked - any left?
+	beq exit				@ none - spurious FIQ? exit
+
+	ldr r10, [r12, #IRQ_SIR_FIQ_REG_OFFSET]	@ get requested interrupt number
+
+	mov r8, #2				@ reset FIQ agreement
+	str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
+
+	cmp r10, #INT_GPIO_BANK1		@ is it GPIO bank interrupt?
+	beq gpio				@ yes - process it
+
+	mov r8, #1
+	orr r8, r11, r8, lsl r10		@ mask spurious interrupt
+	str r8, [r12, #IRQ_MIR_REG_OFFSET]
+exit:
+	subs	pc, lr, #4			@ return from FIQ
+	@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+	@@@@@@@@@@@@@@@@@@@@@@@@@@@
+gpio:	@ GPIO bank interrupt handler
+	ldr r12, omap1510_gpio_base		@ set base pointer to GPIO bank
+
+	ldr r11, [r12, #OMAP1510_GPIO_INT_MASK]	@ fetch GPIO interrupts mask
+restart:
+	ldr r13, [r12, #OMAP1510_GPIO_INT_STATUS]	@ fetch status bits
+	bics r13, r13, r11			@ clear masked - any left?
+	beq exit				@ no - spurious interrupt? exit
+
+	orr r11, r11, r13			@ mask all requested interrupts
+	str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+	ands r10, r13, #KEYBRD_CLK_MASK		@ extract keyboard status - set?
+	beq hksw				@ no - try next source
+
+
+	@@@@@@@@@@@@@@@@@@@@@@
+	@ Keyboard clock FIQ mode interrupt handler
+	@ r10 now contains KEYBRD_CLK_MASK, use it
+	str r10, [r12, #OMAP1510_GPIO_INT_STATUS]	@ ack the interrupt
+	bic r11, r11, r10				@ unmask it
+	str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+	@ Process keyboard data
+	ldr r8, [r12, #OMAP1510_GPIO_DATA_INPUT]	@ fetch GPIO input
+
+	ldr r10, [r9, #BUF_STATE]		@ fetch kbd interface state
+	cmp r10, #0				@ are we expecting start bit?
+	bne data				@ no - go to data processing
+
+	ands r8, r8, #KEYBRD_DATA_MASK		@ check start bit - detected?
+	beq hksw				@ no - try next source
+
+	@ r8 contains KEYBRD_DATA_MASK, use it
+	str r8, [r9, #BUF_STATE]		@ enter data processing state
+	@ r10 already contains 0, reuse it
+	str r10, [r9, #BUF_KEY]			@ clear keycode
+	mov r10, #2				@ reset input bit mask
+	str r10, [r9, #BUF_MASK]
+
+	@ Mask other GPIO line interrupts till key done
+	str r11, [r9, #BUF_GPIO_INT_MASK]	@ save mask for later restore
+	mvn r11, #KEYBRD_CLK_MASK		@ prepare all except kbd mask
+	str r11, [r12, #OMAP1510_GPIO_INT_MASK]	@ store into the mask register
+
+	b restart				@ restart
+
+data:	ldr r10, [r9, #BUF_MASK]		@ fetch current input bit mask
+
+	@ r8 still contains GPIO input bits
+	ands r8, r8, #KEYBRD_DATA_MASK		@ is keyboard data line low?
+	ldreq r8, [r9, #BUF_KEY]		@ yes - fetch collected so far,
+	orreq r8, r8, r10			@ set 1 at current mask position
+	streq r8, [r9, #BUF_KEY]		@ and save back
+
+	mov r10, r10, lsl #1			@ shift mask left
+	bics r10, r10, #0x800			@ have we got all the bits?
+	strne r10, [r9, #BUF_MASK]		@ not yet - store the mask
+	bne restart				@ and restart
+
+	@ r10 already contains 0, reuse it
+	str r10, [r9, #BUF_STATE]		@ reset state to start
+
+	@ Key done - restore interrupt mask
+	ldr r10, [r9, #BUF_GPIO_INT_MASK]	@ fetch saved mask
+	and r11, r11, r10			@ unmask all saved as unmasked
+	str r11, [r12, #OMAP1510_GPIO_INT_MASK]	@ restore into the mask register
+
+	@ Try appending the keycode to the circular buffer
+	ldr r10, [r9, #BUF_KEYS_CNT]		@ get saved keystrokes count
+	ldr r8, [r9, #BUF_BUF_LEN]		@ get buffer size
+	cmp r10, r8				@ is buffer full?
+	beq hksw				@ yes - key lost, next source
+
+	add r10, r10, #1			@ incremet keystrokes counter
+	str r10, [r9, #BUF_KEYS_CNT]
+
+	ldr r10, [r9, #BUF_TAIL_OFFSET]		@ get buffer tail offset
+	@ r8 already contains buffer size
+	cmp r10, r8				@ end of buffer?
+	moveq r10, #0				@ yes - rewind to buffer start
+
+	ldr r12, [r9, #BUF_BUFFER_START]	@ get buffer start address
+	add r12, r12, r10, LSL #2		@ calculate buffer tail address
+	ldr r8, [r9, #BUF_KEY]			@ get last keycode
+	str r8, [r12]				@ append it to the buffer tail
+
+	add r10, r10, #1			@ increment buffer tail offset
+	str r10, [r9, #BUF_TAIL_OFFSET]
+
+	ldr r10, [r9, #BUF_CNT_INT_KEY]		@ increment interrupts counter
+	add r10, r10, #1
+	str r10, [r9, #BUF_CNT_INT_KEY]
+	@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+hksw:	@Is hook switch interrupt requested?
+	tst r13, #HOOK_SWITCH_MASK 		@ is hook switch status bit set?
+	beq mdm					@ no - try next source
+
+
+	@@@@@@@@@@@@@@@@@@@@@@@@
+	@ Hook switch interrupt FIQ mode simple handler
+
+	@ Don't toggle active edge, the switch always bounces
+
+	@ Increment hook switch interrupt counter
+	ldr r10, [r9, #BUF_CNT_INT_HSW]
+	add r10, r10, #1
+	str r10, [r9, #BUF_CNT_INT_HSW]
+	@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+mdm:	@Is it a modem interrupt?
+	tst r13, #MODEM_IRQ_MASK 		@ is modem status bit set?
+	beq irq					@ no - check for next interrupt
+
+
+	@@@@@@@@@@@@@@@@@@@@@@@@
+	@ Modem FIQ mode interrupt handler stub
+
+	@ Increment modem interrupt counter
+	ldr r10, [r9, #BUF_CNT_INT_MDM]
+	add r10, r10, #1
+	str r10, [r9, #BUF_CNT_INT_MDM]
+	@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+irq:	@ Place deferred_fiq interrupt request
+	ldr r12, deferred_fiq_ih_base		@ set pointer to IRQ handler
+	mov r10, #DEFERRED_FIQ_MASK		@ set deferred_fiq bit
+	str r10, [r12, #IRQ_ISR_REG_OFFSET] 	@ place it in the ISR register
+
+	ldr r12, omap1510_gpio_base		@ set pointer back to GPIO bank
+	b restart				@ check for next GPIO interrupt
+	@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+/*
+ * Virtual addresses for IO
+ */
+omap_ih1_base:
+	.word OMAP1_IO_ADDRESS(OMAP_IH1_BASE)
+deferred_fiq_ih_base:
+	.word OMAP1_IO_ADDRESS(DEFERRED_FIQ_IH_BASE)
+omap1510_gpio_base:
+	.word OMAP1_IO_ADDRESS(OMAP1510_GPIO_BASE)
+qwerty_fiqin_end:
+
+/*
+ * Check the size of the FIQ,
+ * it cannot go beyond 0xffff0200, and is copied to 0xffff001c
+ */
+.if (qwerty_fiqin_end - qwerty_fiqin_start) > (0x200 - 0x1c)
+	.err
+.endif
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
new file mode 100644
index 0000000..6c994e2
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -0,0 +1,155 @@
+/*
+ *  Amstrad E3 FIQ handling
+ *
+ *  Copyright (C) 2009 Janusz Krzysztofik
+ *  Copyright (c) 2006 Matt Callow
+ *  Copyright (c) 2004 Amstrad Plc
+ *  Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Parts of this code are taken from linux/arch/arm/mach-omap/irq.c
+ * in the MontaVista 2.4 kernel (and the Amstrad changes therein)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <plat/board-ams-delta.h>
+
+#include <asm/fiq.h>
+#include <mach/ams-delta-fiq.h>
+
+static struct fiq_handler fh = {
+	.name	= "ams-delta-fiq"
+};
+
+/*
+ * This buffer is shared between FIQ and IRQ contexts.
+ * The FIQ and IRQ isrs can both read and write it.
+ * It is structured as a header section several 32bit slots,
+ * followed by the circular buffer where the FIQ isr stores
+ * keystrokes received from the qwerty keyboard.
+ * See ams-delta-fiq.h for details of offsets.
+ */
+unsigned int fiq_buffer[1024];
+EXPORT_SYMBOL(fiq_buffer);
+
+static unsigned int irq_counter[16];
+
+static irqreturn_t deferred_fiq(int irq, void *dev_id)
+{
+	struct irq_desc *irq_desc;
+	struct irq_chip *irq_chip = NULL;
+	int gpio, irq_num, fiq_count;
+
+	irq_desc = irq_to_desc(IH_GPIO_BASE);
+	if (irq_desc)
+		irq_chip = irq_desc->chip;
+
+	/*
+	 * For each handled GPIO interrupt, keep calling its interrupt handler
+	 * until the IRQ counter catches the FIQ incremented interrupt counter.
+	 */
+	for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK;
+			gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) {
+		irq_num = gpio_to_irq(gpio);
+		fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
+
+		while (irq_counter[gpio] < fiq_count) {
+			if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+				/*
+				 * It looks like handle_edge_irq() that
+				 * OMAP GPIO edge interrupts default to,
+				 * expects interrupt already unmasked.
+				 */
+				if (irq_chip && irq_chip->unmask)
+					irq_chip->unmask(irq_num);
+			}
+			generic_handle_irq(irq_num);
+
+			irq_counter[gpio]++;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+void __init ams_delta_init_fiq(void)
+{
+	void *fiqhandler_start;
+	unsigned int fiqhandler_length;
+	struct pt_regs FIQ_regs;
+	unsigned long val, offset;
+	int i, retval;
+
+	fiqhandler_start = &qwerty_fiqin_start;
+	fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start;
+	pr_info("Installing fiq handler from %p, length 0x%x\n",
+			fiqhandler_start, fiqhandler_length);
+
+	retval = claim_fiq(&fh);
+	if (retval) {
+		pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n",
+				retval);
+		return;
+	}
+
+	retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq,
+			IRQ_TYPE_EDGE_RISING, "deferred_fiq", 0);
+	if (retval < 0) {
+		pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval);
+		release_fiq(&fh);
+		return;
+	}
+	/*
+	 * Since no set_type() method is provided by OMAP irq chip,
+	 * switch to edge triggered interrupt type manually.
+	 */
+	offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+	val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
+	omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
+
+	set_fiq_handler(fiqhandler_start, fiqhandler_length);
+
+	/*
+	 * Initialise the buffer which is shared
+	 * between FIQ mode and IRQ mode
+	 */
+	fiq_buffer[FIQ_GPIO_INT_MASK]	= 0;
+	fiq_buffer[FIQ_MASK]		= 0;
+	fiq_buffer[FIQ_STATE]		= 0;
+	fiq_buffer[FIQ_KEY]		= 0;
+	fiq_buffer[FIQ_KEYS_CNT]	= 0;
+	fiq_buffer[FIQ_KEYS_HICNT]	= 0;
+	fiq_buffer[FIQ_TAIL_OFFSET]	= 0;
+	fiq_buffer[FIQ_HEAD_OFFSET]	= 0;
+	fiq_buffer[FIQ_BUF_LEN]		= 256;
+	fiq_buffer[FIQ_MISSED_KEYS]	= 0;
+	fiq_buffer[FIQ_BUFFER_START]	=
+			(unsigned int) &fiq_buffer[FIQ_CIRC_BUFF];
+
+	for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++)
+		fiq_buffer[i] = 0;
+
+	/*
+	 * FIQ mode r9 always points to the fiq_buffer, becauses the FIQ isr
+	 * will run in an unpredictable context. The fiq_buffer is the FIQ isr's
+	 * only means of communication with the IRQ level and other kernel
+	 * context code.
+	 */
+	FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer;
+	set_fiq_regs(&FIQ_regs);
+
+	pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer);
+
+	/*
+	 * Redirect GPIO interrupts to FIQ
+	 */
+	offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+	val = omap_readl(OMAP_IH1_BASE + offset) | 1;
+	omap_writel(val, OMAP_IH1_BASE + offset);
+}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 7fc11c3..fdd1dd5 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -33,6 +33,8 @@
 #include <plat/board.h>
 #include <plat/common.h>
 
+#include <mach/ams-delta-fiq.h>
+
 static u8 ams_delta_latch1_reg;
 static u16 ams_delta_latch2_reg;
 
@@ -236,6 +238,10 @@
 	omap_usb_init(&ams_delta_usb_config);
 	platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
 
+#ifdef CONFIG_AMS_DELTA_FIQ
+	ams_delta_init_fiq();
+#endif
+
 	omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
 }
 
@@ -263,8 +269,18 @@
 
 static int __init ams_delta_modem_init(void)
 {
+	int err;
+
 	omap_cfg_reg(M14_1510_GPIO2);
-	ams_delta_modem_ports[0].irq = gpio_to_irq(2);
+	ams_delta_modem_ports[0].irq =
+			gpio_to_irq(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
+
+	err = gpio_request(AMS_DELTA_GPIO_PIN_MODEM_IRQ, "modem");
+	if (err) {
+		pr_err("Couldn't request gpio pin for modem\n");
+		return err;
+	}
+	gpio_direction_input(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
 
 	ams_delta_latch2_write(
 		AMS_DELTA_LATCH2_MODEM_NRESET | AMS_DELTA_LATCH2_MODEM_CODEC,
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index e0aec10..6bbb1b8 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -578,7 +578,7 @@
 
 #ifdef CONFIG_OMAP_RESET_CLOCKS
 
-void __init omap1_clk_disable_unused(struct clk *clk)
+void omap1_clk_disable_unused(struct clk *clk)
 {
 	__u32 regval32;
 
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index a4190af..75d0d7d 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -39,7 +39,7 @@
 extern unsigned long omap1_watchdog_recalc(struct clk *clk);
 
 #ifdef CONFIG_OMAP_RESET_CLOCKS
-extern void __init omap1_clk_disable_unused(struct clk *clk);
+extern void omap1_clk_disable_unused(struct clk *clk);
 #else
 #define omap1_clk_disable_unused	NULL
 #endif
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
new file mode 100644
index 0000000..7a2df29
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/mach-omap1/include/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __AMS_DELTA_FIQ_H
+#define __AMS_DELTA_FIQ_H
+
+#include <plat/irqs.h>
+
+/*
+ * Interrupt number used for passing control from FIQ to IRQ.
+ * IRQ12, described as reserved, has been selected.
+ */
+#define INT_DEFERRED_FIQ	INT_1510_RES12
+/*
+ * Base address of an interrupt handler that the INT_DEFERRED_FIQ belongs to.
+ */
+#if (INT_DEFERRED_FIQ < IH2_BASE)
+#define DEFERRED_FIQ_IH_BASE	OMAP_IH1_BASE
+#else
+#define DEFERRED_FIQ_IH_BASE	OMAP_IH2_BASE
+#endif
+
+/*
+ * These are the offsets from the begining of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK		 0
+#define FIQ_STATE		 1
+#define FIQ_KEYS_CNT		 2
+#define FIQ_TAIL_OFFSET		 3
+#define FIQ_HEAD_OFFSET		 4
+#define FIQ_BUF_LEN		 5
+#define FIQ_KEY			 6
+#define FIQ_MISSED_KEYS		 7
+#define FIQ_BUFFER_START	 8
+#define FIQ_GPIO_INT_MASK	 9
+#define FIQ_KEYS_HICNT		10
+#define FIQ_IRQ_PEND		11
+#define FIQ_SIR_CODE_L1		12
+#define IRQ_SIR_CODE_L2		13
+
+#define FIQ_CNT_INT_00		14
+#define FIQ_CNT_INT_KEY		15
+#define FIQ_CNT_INT_MDM		16
+#define FIQ_CNT_INT_03		17
+#define FIQ_CNT_INT_HSW		18
+#define FIQ_CNT_INT_05		19
+#define FIQ_CNT_INT_06		20
+#define FIQ_CNT_INT_07		21
+#define FIQ_CNT_INT_08		22
+#define FIQ_CNT_INT_09		23
+#define FIQ_CNT_INT_10		24
+#define FIQ_CNT_INT_11		25
+#define FIQ_CNT_INT_12		26
+#define FIQ_CNT_INT_13		27
+#define FIQ_CNT_INT_14		28
+#define FIQ_CNT_INT_15		29
+
+#define FIQ_CIRC_BUFF		30      /*Start of circular buffer */
+
+#ifndef __ASSEMBLER__
+extern unsigned int fiq_buffer[];
+extern unsigned char qwerty_fiqin_start, qwerty_fiqin_end;
+
+extern void __init ams_delta_init_fiq(void);
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap1/include/mach/debug-macro.S b/arch/arm/mach-omap1/include/mach/debug-macro.S
index b6d9584..e8a8cf3 100644
--- a/arch/arm/mach-omap1/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap1/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
 
 #include <linux/serial_reg.h>
 
+#include <asm/memory.h>
+
 #include <plat/serial.h>
 
 		.pushsection .data
@@ -37,23 +39,12 @@
 		cmp	\rx, #0			@ is port configured?
 		bne	99f			@ already configured
 
-		/* Check 7XX UART1 scratchpad register for uart to use */
+		/* Check the debug UART configuration set in uncompress.h */
 		mrc	p15, 0, \rx, c1, c0
 		tst	\rx, #1			@ MMU enabled?
-		moveq	\rx, #0xff000000	@ physical base address
-		movne	\rx, #0xfe000000	@ virtual base
-		orr	\rx, \rx, #0x00fb0000	@ OMAP1UART1
-		ldrb	\rx, [\rx, #(UART_SCR << OMAP7XX_PORT_SHIFT)]
-		cmp	\rx, #0			@ anything in 7XX scratchpad?
-		bne	10f			@ found 7XX uart
-
-		/* Check 15xx/16xx UART1 scratchpad register for uart to use */
-		mrc	p15, 0, \rx, c1, c0
-		tst	\rx, #1			@ MMU enabled?
-		moveq	\rx, #0xff000000	@ physical base address
-		movne	\rx, #0xfe000000	@ virtual base
-		orr	\rx, \rx, #0x00fb0000	@ OMAP1UART1
-		ldrb	\rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)]
+		ldreq	\rx, =OMAP_UART_INFO
+		ldrne	\rx, =__phys_to_virt(OMAP_UART_INFO)
+		ldr	\rx, [\rx, #0]
 
 		/* Select the UART to use based on the UART1 scratchpad value */
 10:		cmp	\rx, #0			@ no port configured?
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 2455dcc..b31b6f1 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -10,6 +10,7 @@
 config ARCH_OMAP2430
 	bool "OMAP2430 support"
 	depends on ARCH_OMAP2
+	select ARCH_OMAP_OTG
 
 config ARCH_OMAP3430
 	bool "OMAP3430 support"
@@ -141,6 +142,12 @@
 	depends on ARCH_OMAP3
 	select OMAP_PACKAGE_CBB
 
+config MACH_SBC3530
+	bool "OMAP3 SBC STALKER board"
+	depends on ARCH_OMAP3
+	select OMAP_PACKAGE_CUS
+	select OMAP_MUX
+
 config MACH_OMAP_3630SDP
 	bool "OMAP3630 SDP board"
 	depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4b9fc57..ea52b03 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -22,7 +22,7 @@
 # SMP support ONLY available for OMAP4
 obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
-obj-$(CONFIG_ARCH_OMAP4)		+= omap44xx-smc.o
+obj-$(CONFIG_ARCH_OMAP4)		+= omap44xx-smc.o omap4-common.o
 
 AFLAGS_omap44xx-smc.o			:=-Wa,-march=armv7-a
 
@@ -89,10 +89,7 @@
 obj-$(CONFIG_OMAP_MBOX_FWK)		+= mailbox_mach.o
 mailbox_mach-objs			:= mailbox.o
 
-iommu-y					+= iommu2.o
-iommu-$(CONFIG_ARCH_OMAP3)		+= omap3-iommu.o
-
-obj-$(CONFIG_OMAP_IOMMU)		+= $(iommu-y)
+obj-$(CONFIG_OMAP_IOMMU)		:= iommu2.o omap-iommu.o
 
 i2c-omap-$(CONFIG_I2C_OMAP)		:= i2c.o
 obj-y					+= $(i2c-omap-m) $(i2c-omap-y)
@@ -122,6 +119,7 @@
 obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51.o \
 					   board-rx51-sdram.o \
 					   board-rx51-peripherals.o \
+					   board-rx51-video.o \
 					   hsmmc.o
 obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom2.o \
 					   board-zoom-peripherals.o \
@@ -140,10 +138,13 @@
 					   hsmmc.o
 obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)	+= board-omap3touchbook.o \
 					   hsmmc.o
-obj-$(CONFIG_MACH_OMAP_4430SDP)		+= board-4430sdp.o
+obj-$(CONFIG_MACH_OMAP_4430SDP)		+= board-4430sdp.o \
+					   hsmmc.o
 
 obj-$(CONFIG_MACH_OMAP3517EVM)		+= board-am3517evm.o
 
+obj-$(CONFIG_MACH_SBC3530)		+= board-omap3stalker.o \
+					   hsmmc.o
 # Platform specific device init code
 obj-y					+= usb-musb.o
 obj-$(CONFIG_MACH_OMAP2_TUSB6010)	+= usb-tusb6010.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 01d113f..a11a575 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -174,9 +174,18 @@
 	},
 };
 
+static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
+	{
+		I2C_BOARD_INFO("isp1301_omap", 0x2D),
+		.flags = I2C_CLIENT_WAKE,
+		.irq = OMAP_GPIO_IRQ(78),
+	},
+};
+
 static int __init omap2430_i2c_init(void)
 {
-	omap_register_i2c_bus(1, 400, NULL, 0);
+	omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
+			ARRAY_SIZE(sdp2430_i2c1_boardinfo));
 	omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo,
 			ARRAY_SIZE(sdp2430_i2c_boardinfo));
 	return 0;
@@ -198,6 +207,15 @@
 	.mode			= MUSB_OTG,
 	.power			= 100,
 };
+static struct omap_usb_config sdp2430_usb_config __initdata = {
+	.otg		= 1,
+#ifdef  CONFIG_USB_GADGET_OMAP
+	.hmc_mode	= 0x0,
+#elif   defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+	.hmc_mode	= 0x1,
+#endif
+	.pins[0]	= 3,
+};
 
 static void __init omap_2430sdp_init(void)
 {
@@ -208,6 +226,7 @@
 	platform_add_devices(sdp2430_devices, ARRAY_SIZE(sdp2430_devices));
 	omap_serial_init();
 	omap2_hsmmc_init(mmc);
+	omap_usb_init(&sdp2430_usb_config);
 	usb_musb_init(&musb_board_data);
 	board_smc91x_init();
 
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 5822bcf..e7d629b 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -150,6 +150,7 @@
 static struct ads7846_platform_data tsc2046_config __initdata = {
 	.get_pendown_state	= ads7846_get_pendown_state,
 	.keep_vref_on		= 1,
+	.wakeup				= true,
 };
 
 
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index b88f28c..e4a5d66 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -18,8 +18,12 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/usb/otg.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c/twl.h>
+#include <linux/regulator/machine.h>
 
 #include <mach/hardware.h>
+#include <mach/omap4-common.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -29,8 +33,77 @@
 #include <plat/control.h>
 #include <plat/timer-gp.h>
 #include <plat/usb.h>
-#include <asm/hardware/gic.h>
-#include <asm/hardware/cache-l2x0.h>
+#include <plat/mmc.h>
+#include "hsmmc.h"
+
+#define ETH_KS8851_IRQ			34
+#define ETH_KS8851_POWER_ON		48
+#define ETH_KS8851_QUART		138
+
+static struct spi_board_info sdp4430_spi_board_info[] __initdata = {
+	{
+		.modalias               = "ks8851",
+		.bus_num                = 1,
+		.chip_select            = 0,
+		.max_speed_hz           = 24000000,
+		.irq                    = ETH_KS8851_IRQ,
+	},
+};
+
+static int omap_ethernet_init(void)
+{
+	int status;
+
+	/* Request of GPIO lines */
+
+	status = gpio_request(ETH_KS8851_POWER_ON, "eth_power");
+	if (status) {
+		pr_err("Cannot request GPIO %d\n", ETH_KS8851_POWER_ON);
+		return status;
+	}
+
+	status = gpio_request(ETH_KS8851_QUART, "quart");
+	if (status) {
+		pr_err("Cannot request GPIO %d\n", ETH_KS8851_QUART);
+		goto error1;
+	}
+
+	status = gpio_request(ETH_KS8851_IRQ, "eth_irq");
+	if (status) {
+		pr_err("Cannot request GPIO %d\n", ETH_KS8851_IRQ);
+		goto error2;
+	}
+
+	/* Configuration of requested GPIO lines */
+
+	status = gpio_direction_output(ETH_KS8851_POWER_ON, 1);
+	if (status) {
+		pr_err("Cannot set output GPIO %d\n", ETH_KS8851_IRQ);
+		goto error3;
+	}
+
+	status = gpio_direction_output(ETH_KS8851_QUART, 1);
+	if (status) {
+		pr_err("Cannot set output GPIO %d\n", ETH_KS8851_QUART);
+		goto error3;
+	}
+
+	status = gpio_direction_input(ETH_KS8851_IRQ);
+	if (status) {
+		pr_err("Cannot set input GPIO %d\n", ETH_KS8851_IRQ);
+		goto error3;
+	}
+
+	return 0;
+
+error3:
+	gpio_free(ETH_KS8851_IRQ);
+error2:
+	gpio_free(ETH_KS8851_QUART);
+error1:
+	gpio_free(ETH_KS8851_POWER_ON);
+	return status;
+}
 
 static struct platform_device sdp4430_lcd_device = {
 	.name		= "sdp4430_lcd",
@@ -49,50 +122,6 @@
 	{ OMAP_TAG_LCD,		&sdp4430_lcd_config },
 };
 
-#ifdef CONFIG_CACHE_L2X0
-static int __init omap_l2_cache_init(void)
-{
-	extern void omap_smc1(u32 fn, u32 arg);
-	void __iomem *l2cache_base;
-
-	/* To avoid code running on other OMAPs in
-	 * multi-omap builds
-	 */
-	if (!cpu_is_omap44xx())
-		return -ENODEV;
-
-	/* Static mapping, never released */
-	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
-	BUG_ON(!l2cache_base);
-
-	/* Enable PL310 L2 Cache controller */
-	omap_smc1(0x102, 0x1);
-
-	/* 32KB way size, 16-way associativity,
-	* parity disabled
-	*/
-	l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
-
-	return 0;
-}
-early_initcall(omap_l2_cache_init);
-#endif
-
-static void __init gic_init_irq(void)
-{
-	void __iomem *base;
-
-	/* Static mapping, never released */
-	base = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
-	BUG_ON(!base);
-	gic_dist_init(0, base, 29);
-
-	/* Static mapping, never released */
-	gic_cpu_base_addr = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
-	BUG_ON(!gic_cpu_base_addr);
-	gic_cpu_init(0, gic_cpu_base_addr);
-}
-
 static void __init omap_4430sdp_init_irq(void)
 {
 	omap_board_config = sdp4430_config;
@@ -111,15 +140,254 @@
 	.power			= 100,
 };
 
+static struct omap2_hsmmc_info mmc[] = {
+	{
+		.mmc		= 1,
+		.wires		= 8,
+		.gpio_wp	= -EINVAL,
+	},
+	{
+		.mmc		= 2,
+		.wires		= 8,
+		.gpio_cd	= -EINVAL,
+		.gpio_wp	= -EINVAL,
+		.nonremovable   = true,
+	},
+	{}	/* Terminator */
+};
+
+static struct regulator_consumer_supply sdp4430_vmmc_supply[] = {
+	{
+		.supply = "vmmc",
+		.dev_name = "mmci-omap-hs.0",
+	},
+	{
+		.supply = "vmmc",
+		.dev_name = "mmci-omap-hs.1",
+	},
+};
+
+static int omap4_twl6030_hsmmc_late_init(struct device *dev)
+{
+	int ret = 0;
+	struct platform_device *pdev = container_of(dev,
+				struct platform_device, dev);
+	struct omap_mmc_platform_data *pdata = dev->platform_data;
+
+	/* Setting MMC1 Card detect Irq */
+	if (pdev->id == 0)
+		pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
+						MMCDETECT_INTR_OFFSET;
+	return ret;
+}
+
+static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
+{
+	struct omap_mmc_platform_data *pdata = dev->platform_data;
+
+	pdata->init =	omap4_twl6030_hsmmc_late_init;
+}
+
+static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
+{
+	struct omap2_hsmmc_info *c;
+
+	omap2_hsmmc_init(controllers);
+	for (c = controllers; c->mmc; c++)
+		omap4_twl6030_hsmmc_set_late_init(c->dev);
+
+	return 0;
+}
+
+static struct regulator_init_data sdp4430_vaux1 = {
+	.constraints = {
+		.min_uV			= 1000000,
+		.max_uV			= 3000000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vaux2 = {
+	.constraints = {
+		.min_uV			= 1200000,
+		.max_uV			= 2800000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vaux3 = {
+	.constraints = {
+		.min_uV			= 1000000,
+		.max_uV			= 3000000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+/* VMMC1 for MMC1 card */
+static struct regulator_init_data sdp4430_vmmc = {
+	.constraints = {
+		.min_uV			= 1200000,
+		.max_uV			= 3000000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies  = 2,
+	.consumer_supplies      = sdp4430_vmmc_supply,
+};
+
+static struct regulator_init_data sdp4430_vpp = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 2500000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vusim = {
+	.constraints = {
+		.min_uV			= 1200000,
+		.max_uV			= 2900000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vana = {
+	.constraints = {
+		.min_uV			= 2100000,
+		.max_uV			= 2100000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vcxio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vdac = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct regulator_init_data sdp4430_vusb = {
+	.constraints = {
+		.min_uV			= 3300000,
+		.max_uV			= 3300000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask	 =	REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct twl4030_platform_data sdp4430_twldata = {
+	.irq_base	= TWL6030_IRQ_BASE,
+	.irq_end	= TWL6030_IRQ_END,
+
+	/* Regulators */
+	.vmmc		= &sdp4430_vmmc,
+	.vpp		= &sdp4430_vpp,
+	.vusim		= &sdp4430_vusim,
+	.vana		= &sdp4430_vana,
+	.vcxio		= &sdp4430_vcxio,
+	.vdac		= &sdp4430_vdac,
+	.vusb		= &sdp4430_vusb,
+	.vaux1		= &sdp4430_vaux1,
+	.vaux2		= &sdp4430_vaux2,
+	.vaux3		= &sdp4430_vaux3,
+};
+
+static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
+	{
+		I2C_BOARD_INFO("twl6030", 0x48),
+		.flags = I2C_CLIENT_WAKE,
+		.irq = OMAP44XX_IRQ_SYS_1N,
+		.platform_data = &sdp4430_twldata,
+	},
+};
+static int __init omap4_i2c_init(void)
+{
+	/*
+	 * Phoenix Audio IC needs I2C1 to
+	 * start with 400 KHz or less
+	 */
+	omap_register_i2c_bus(1, 400, sdp4430_i2c_boardinfo,
+			ARRAY_SIZE(sdp4430_i2c_boardinfo));
+	omap_register_i2c_bus(2, 400, NULL, 0);
+	omap_register_i2c_bus(3, 400, NULL, 0);
+	omap_register_i2c_bus(4, 400, NULL, 0);
+	return 0;
+}
 static void __init omap_4430sdp_init(void)
 {
+	int status;
+
+	omap4_i2c_init();
 	platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
 	omap_serial_init();
+	omap4_twl6030_hsmmc_init(mmc);
 	/* OMAP4 SDP uses internal transceiver so register nop transceiver */
 	usb_nop_xceiv_register();
 	/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
 	if (!cpu_is_omap44xx())
 		usb_musb_init(&musb_board_data);
+
+	status = omap_ethernet_init();
+	if (status) {
+		pr_err("Ethernet initialization failed: %d\n", status);
+	} else {
+		sdp4430_spi_board_info[0].irq = gpio_to_irq(ETH_KS8851_IRQ);
+		spi_register_board_info(sdp4430_spi_board_info,
+				ARRAY_SIZE(sdp4430_spi_board_info));
+	}
 }
 
 static void __init omap_4430sdp_map_io(void)
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index c1c4389..af383a8 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -21,6 +21,8 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/i2c/pca953x.h>
+#include <linux/can/platform/ti_hecc.h>
+#include <linux/davinci_emac.h>
 
 #include <mach/hardware.h>
 #include <mach/am35xx.h>
@@ -30,16 +32,111 @@
 
 #include <plat/board.h>
 #include <plat/common.h>
+#include <plat/control.h>
 #include <plat/usb.h>
 #include <plat/display.h>
 
 #include "mux.h"
 
+#define AM35XX_EVM_PHY_MASK		(0xF)
+#define AM35XX_EVM_MDIO_FREQUENCY	(1000000)
+
+static struct emac_platform_data am3517_evm_emac_pdata = {
+	.phy_mask	= AM35XX_EVM_PHY_MASK,
+	.mdio_max_freq	= AM35XX_EVM_MDIO_FREQUENCY,
+	.rmii_en	= 1,
+};
+
+static struct resource am3517_emac_resources[] = {
+	{
+		.start  = AM35XX_IPSS_EMAC_BASE,
+		.end    = AM35XX_IPSS_EMAC_BASE + 0x3FFFF,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.start  = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+		.end    = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+		.end    = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+		.end    = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+		.end    = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device am3517_emac_device = {
+	.name		= "davinci_emac",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(am3517_emac_resources),
+	.resource	= am3517_emac_resources,
+};
+
+static void am3517_enable_ethernet_int(void)
+{
+	u32 regval;
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+		AM35XX_CPGMAC_C0_TX_PULSE_CLR |
+		AM35XX_CPGMAC_C0_MISC_PULSE_CLR |
+		AM35XX_CPGMAC_C0_RX_THRESH_CLR);
+	omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+static void am3517_disable_ethernet_int(void)
+{
+	u32 regval;
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+		AM35XX_CPGMAC_C0_TX_PULSE_CLR);
+	omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
+{
+	unsigned int regval;
+
+	pdata->ctrl_reg_offset		= AM35XX_EMAC_CNTRL_OFFSET;
+	pdata->ctrl_mod_reg_offset	= AM35XX_EMAC_CNTRL_MOD_OFFSET;
+	pdata->ctrl_ram_offset		= AM35XX_EMAC_CNTRL_RAM_OFFSET;
+	pdata->mdio_reg_offset		= AM35XX_EMAC_MDIO_OFFSET;
+	pdata->ctrl_ram_size		= AM35XX_EMAC_CNTRL_RAM_SIZE;
+	pdata->version			= EMAC_VERSION_2;
+	pdata->hw_ram_addr		= AM35XX_EMAC_HW_RAM_ADDR;
+	pdata->interrupt_enable		= am3517_enable_ethernet_int;
+	pdata->interrupt_disable	= am3517_disable_ethernet_int;
+	am3517_emac_device.dev.platform_data	= pdata;
+	platform_device_register(&am3517_emac_device);
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+	regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
+	omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+	return ;
+}
+
+
+
 #define LCD_PANEL_PWR		176
 #define LCD_PANEL_BKLIGHT_PWR	182
 #define LCD_PANEL_PWM		181
 
-static struct i2c_board_info __initdata am3517evm_i2c_boardinfo[] = {
+static struct i2c_board_info __initdata am3517evm_i2c1_boardinfo[] = {
 	{
 		I2C_BOARD_INFO("s35390a", 0x30),
 		.type		= "s35390a",
@@ -69,7 +166,7 @@
 		gpio_free(GPIO_RTCS35390A_IRQ);
 		return;
 	}
-	am3517evm_i2c_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
+	am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
 }
 
 /*
@@ -80,7 +177,7 @@
 static struct pca953x_platform_data am3517evm_gpio_expander_info_0 = {
 	.gpio_base	= OMAP_MAX_GPIO_LINES,
 };
-static struct i2c_board_info __initdata am3517evm_tca6416_info_0[] = {
+static struct i2c_board_info __initdata am3517evm_i2c2_boardinfo[] = {
 	{
 		I2C_BOARD_INFO("tca6416", 0x21),
 		.platform_data = &am3517evm_gpio_expander_info_0,
@@ -94,7 +191,7 @@
 static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_2 = {
 	.gpio_base	= OMAP_MAX_GPIO_LINES + 32,
 };
-static struct i2c_board_info __initdata am3517evm_ui_tca6416_info[] = {
+static struct i2c_board_info __initdata am3517evm_i2c3_boardinfo[] = {
 	{
 		I2C_BOARD_INFO("tca6416", 0x20),
 		.platform_data = &am3517evm_ui_gpio_expander_info_1,
@@ -108,10 +205,10 @@
 static int __init am3517_evm_i2c_init(void)
 {
 	omap_register_i2c_bus(1, 400, NULL, 0);
-	omap_register_i2c_bus(2, 400, am3517evm_tca6416_info_0,
-			ARRAY_SIZE(am3517evm_tca6416_info_0));
-	omap_register_i2c_bus(3, 400, am3517evm_ui_tca6416_info,
-			ARRAY_SIZE(am3517evm_ui_tca6416_info));
+	omap_register_i2c_bus(2, 400, am3517evm_i2c2_boardinfo,
+			ARRAY_SIZE(am3517evm_i2c2_boardinfo));
+	omap_register_i2c_bus(3, 400, am3517evm_i2c3_boardinfo,
+			ARRAY_SIZE(am3517evm_i2c3_boardinfo));
 
 	return 0;
 }
@@ -119,6 +216,8 @@
 static int lcd_enabled;
 static int dvi_enabled;
 
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+		defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
 static void __init am3517_evm_display_init(void)
 {
 	int r;
@@ -162,6 +261,9 @@
 err_1:
 	gpio_free(LCD_PANEL_BKLIGHT_PWR);
 }
+#else
+static void __init am3517_evm_display_init(void) {}
+#endif
 
 static int am3517_evm_panel_enable_lcd(struct omap_dss_device *dssdev)
 {
@@ -275,7 +377,12 @@
 
 static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
 	.port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+		defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+#else
 	.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+#endif
 	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
 
 	.phy_reset  = true,
@@ -292,6 +399,42 @@
 #define board_mux	NULL
 #endif
 
+
+static struct resource am3517_hecc_resources[] = {
+	{
+		.start	= AM35XX_IPSS_HECC_BASE,
+		.end	= AM35XX_IPSS_HECC_BASE + 0x3FFF,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_35XX_HECC0_IRQ,
+		.end	= INT_35XX_HECC0_IRQ,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device am3517_hecc_device = {
+	.name		= "ti_hecc",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(am3517_hecc_resources),
+	.resource	= am3517_hecc_resources,
+};
+
+static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
+	.scc_hecc_offset	= AM35XX_HECC_SCC_HECC_OFFSET,
+	.scc_ram_offset		= AM35XX_HECC_SCC_RAM_OFFSET,
+	.hecc_ram_offset	= AM35XX_HECC_RAM_OFFSET,
+	.mbx_offset		= AM35XX_HECC_MBOX_OFFSET,
+	.int_line		= AM35XX_HECC_INT_LINE,
+	.version		= AM35XX_HECC_VERSION,
+};
+
+static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
+{
+	am3517_hecc_device.dev.platform_data = pdata;
+	platform_device_register(&am3517_hecc_device);
+}
+
 static void __init am3517_evm_init(void)
 {
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -305,14 +448,17 @@
 	/* Configure GPIO for EHCI port */
 	omap_mux_init_gpio(57, OMAP_PIN_OUTPUT);
 	usb_ehci_init(&ehci_pdata);
+	am3517_evm_hecc_init(&am3517_evm_hecc_pdata);
 	/* DSS */
 	am3517_evm_display_init();
 
 	/* RTC - S35390A */
 	am3517_evm_rtc_init();
 
-	i2c_register_board_info(1, am3517evm_i2c_boardinfo,
-				ARRAY_SIZE(am3517evm_i2c_boardinfo));
+	i2c_register_board_info(1, am3517evm_i2c1_boardinfo,
+				ARRAY_SIZE(am3517evm_i2c1_boardinfo));
+	/*Ethernet*/
+	am3517_evm_ethernet_init(&am3517_evm_emac_pdata);
 }
 
 static void __init am3517_evm_map_io(void)
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 2de4f79..e679a2c 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -45,6 +45,7 @@
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/mcspi.h>
 
 #include <mach/hardware.h>
 
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 47e3af2..77022b5 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -633,8 +633,163 @@
 	.reset_gpio_port[2]  = -EINVAL
 };
 
+static struct omap_board_mux board_mux[] __initdata = {
+	/* nCS and IRQ for Devkit8000 ethernet */
+	OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE0),
+	OMAP3_MUX(ETK_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+
+	/* McSPI 2*/
+	OMAP3_MUX(MCSPI2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCSPI2_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(MCSPI2_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCSPI2_CS0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(MCSPI2_CS1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+	/* PENDOWN GPIO */
+	OMAP3_MUX(ETK_D13, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+	/* mUSB */
+	OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* USB 1 */
+	OMAP3_MUX(ETK_CTL, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(ETK_D8, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D9, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D0, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D2, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D4, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D5, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D6, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+	OMAP3_MUX(ETK_D7, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+
+	/* MMC 1 */
+	OMAP3_MUX(SDMMC1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(SDMMC1_DAT7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* McBSP 2 */
+	OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+	/* I2C 1 */
+	OMAP3_MUX(I2C1_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(I2C1_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* I2C 2 */
+	OMAP3_MUX(I2C2_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* I2C 3 */
+	OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* I2C 4 */
+	OMAP3_MUX(I2C4_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(I2C4_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* serial ports */
+	OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+	OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* DSS */
+	OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+	/* expansion port */
+	/* McSPI 1 */
+	OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+	OMAP3_MUX(MCSPI1_CS3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+
+	/* HDQ */
+	OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	/* McSPI4 */
+	OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+	OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
+
+	/* MMC 2 */
+	OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+
+	/* I2C3 */
+	OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+	OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+	OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+	OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+	OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+	/* TPS IRQ */
+	OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
+			OMAP_PIN_INPUT_PULLUP),
+
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
 static void __init devkit8000_init(void)
 {
+	omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
 	omap_serial_init();
 
 	omap_dm9000_init();
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 962d377..69b154c 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -39,6 +39,7 @@
 
 #include <plat/board.h>
 #include <plat/common.h>
+#include <plat/display.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -106,6 +107,77 @@
 	.resource	= &omap3beagle_nand_resource,
 };
 
+/* DSS */
+
+static int beagle_enable_dvi(struct omap_dss_device *dssdev)
+{
+	if (gpio_is_valid(dssdev->reset_gpio))
+		gpio_set_value(dssdev->reset_gpio, 1);
+
+	return 0;
+}
+
+static void beagle_disable_dvi(struct omap_dss_device *dssdev)
+{
+	if (gpio_is_valid(dssdev->reset_gpio))
+		gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device beagle_dvi_device = {
+	.type = OMAP_DISPLAY_TYPE_DPI,
+	.name = "dvi",
+	.driver_name = "generic_panel",
+	.phy.dpi.data_lines = 24,
+	.reset_gpio = 170,
+	.platform_enable = beagle_enable_dvi,
+	.platform_disable = beagle_disable_dvi,
+};
+
+static struct omap_dss_device beagle_tv_device = {
+	.name = "tv",
+	.driver_name = "venc",
+	.type = OMAP_DISPLAY_TYPE_VENC,
+	.phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+static struct omap_dss_device *beagle_dss_devices[] = {
+	&beagle_dvi_device,
+	&beagle_tv_device,
+};
+
+static struct omap_dss_board_info beagle_dss_data = {
+	.num_devices = ARRAY_SIZE(beagle_dss_devices),
+	.devices = beagle_dss_devices,
+	.default_device = &beagle_dvi_device,
+};
+
+static struct platform_device beagle_dss_device = {
+	.name          = "omapdss",
+	.id            = -1,
+	.dev            = {
+		.platform_data = &beagle_dss_data,
+	},
+};
+
+static struct regulator_consumer_supply beagle_vdac_supply =
+	REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_consumer_supply beagle_vdvi_supply =
+	REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static void __init beagle_display_init(void)
+{
+	int r;
+
+	r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
+	if (r < 0) {
+		printk(KERN_ERR "Unable to get DVI reset GPIO\n");
+		return;
+	}
+
+	gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
+}
+
 #include "sdram-micron-mt46h32m32lf-6.h"
 
 static struct omap2_hsmmc_info mmc[] = {
@@ -117,15 +189,6 @@
 	{}	/* Terminator */
 };
 
-static struct platform_device omap3_beagle_lcd_device = {
-	.name		= "omap3beagle_lcd",
-	.id		= -1,
-};
-
-static struct omap_lcd_config omap3_beagle_lcd_config __initdata = {
-	.ctrl_name	= "internal",
-};
-
 static struct regulator_consumer_supply beagle_vmmc1_supply = {
 	.supply			= "vmmc",
 };
@@ -181,16 +244,6 @@
 	.setup		= beagle_twl_gpio_setup,
 };
 
-static struct regulator_consumer_supply beagle_vdac_supply = {
-	.supply		= "vdac",
-	.dev		= &omap3_beagle_lcd_device.dev,
-};
-
-static struct regulator_consumer_supply beagle_vdvi_supply = {
-	.supply		= "vdvi",
-	.dev		= &omap3_beagle_lcd_device.dev,
-};
-
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
 static struct regulator_init_data beagle_vmmc1 = {
 	.constraints = {
@@ -349,14 +402,8 @@
 	},
 };
 
-static struct omap_board_config_kernel omap3_beagle_config[] __initdata = {
-	{ OMAP_TAG_LCD,		&omap3_beagle_lcd_config },
-};
-
 static void __init omap3_beagle_init_irq(void)
 {
-	omap_board_config = omap3_beagle_config;
-	omap_board_config_size = ARRAY_SIZE(omap3_beagle_config);
 	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
 			     mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
@@ -367,9 +414,9 @@
 }
 
 static struct platform_device *omap3_beagle_devices[] __initdata = {
-	&omap3_beagle_lcd_device,
 	&leds_gpio,
 	&keys_gpio,
+	&beagle_dss_device,
 };
 
 static void __init omap3beagle_flash_init(void)
@@ -456,6 +503,8 @@
 	/* Ensure SDRC pins are mux'd for self-refresh */
 	omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
 	omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+
+	beagle_display_init();
 }
 
 static void __init omap3_beagle_map_io(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 7d7b5bc..81bba19 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -600,6 +600,7 @@
 	.get_pendown_state	= ads7846_get_pendown_state,
 	.keep_vref_on		= 1,
 	.settle_delay_usecs	= 150,
+	.wakeup				= true,
 };
 
 static struct omap2_mcspi_device_config ads7846_mcspi_config = {
@@ -651,11 +652,10 @@
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
 	OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
-				OMAP_PIN_OFF_INPUT_PULLUP |
+				OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
 				OMAP_PIN_OFF_WAKEUPENABLE),
 	OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
-				OMAP_PIN_OFF_INPUT_PULLUP |
-				OMAP_PIN_OFF_WAKEUPENABLE),
+				OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
new file mode 100644
index 0000000..f848ba8
--- /dev/null
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -0,0 +1,672 @@
+/*
+ * linux/arch/arm/mach-omap2/board-omap3evm.c
+ *
+ * Copyright (C) 2008 Guangzhou EMA-Tech
+ *
+ * Modified from mach-omap2/board-omap3evm.c
+ *
+ * Initial code: Syed Mohammed Khasim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/gpmc.h>
+#include <plat/nand.h>
+#include <plat/usb.h>
+#include <plat/timer-gp.h>
+#include <plat/display.h>
+
+#include <plat/mcspi.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ads7846.h>
+#include <linux/interrupt.h>
+#include <linux/smsc911x.h>
+#include <linux/i2c/at24.h>
+
+#include "sdram-micron-mt46h32m32lf-6.h"
+#include "mux.h"
+#include "hsmmc.h"
+
+#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#define OMAP3STALKER_ETHR_START	0x2c000000
+#define OMAP3STALKER_ETHR_SIZE	1024
+#define OMAP3STALKER_ETHR_GPIO_IRQ	19
+#define OMAP3STALKER_SMC911X_CS	5
+
+static struct resource omap3stalker_smsc911x_resources[] = {
+	[0] = {
+	       .start	= OMAP3STALKER_ETHR_START,
+	       .end	=
+	       (OMAP3STALKER_ETHR_START + OMAP3STALKER_ETHR_SIZE - 1),
+	       .flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+	       .start	= OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
+	       .end	= OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
+	       .flags	= (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
+};
+
+static struct platform_device omap3stalker_smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(omap3stalker_smsc911x_resources),
+	.resource	= &omap3stalker_smsc911x_resources[0],
+	.dev		= {
+		.platform_data	= &smsc911x_config,
+	},
+};
+
+static inline void __init omap3stalker_init_eth(void)
+{
+	int eth_cs;
+	struct clk *l3ck;
+	unsigned int rate;
+
+	eth_cs = OMAP3STALKER_SMC911X_CS;
+
+	l3ck = clk_get(NULL, "l3_ck");
+	if (IS_ERR(l3ck))
+		rate = 100000000;
+	else
+		rate = clk_get_rate(l3ck);
+
+	omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
+	if (gpio_request(OMAP3STALKER_ETHR_GPIO_IRQ, "SMC911x irq") < 0) {
+		printk(KERN_ERR
+		       "Failed to request GPIO%d for smc911x IRQ\n",
+		       OMAP3STALKER_ETHR_GPIO_IRQ);
+		return;
+	}
+
+	gpio_direction_input(OMAP3STALKER_ETHR_GPIO_IRQ);
+
+	platform_device_register(&omap3stalker_smsc911x_device);
+}
+
+#else
+static inline void __init omap3stalker_init_eth(void)
+{
+	return;
+}
+#endif
+
+/*
+ * OMAP3 DSS control signals
+ */
+
+#define DSS_ENABLE_GPIO	199
+#define LCD_PANEL_BKLIGHT_GPIO	210
+#define ENABLE_VPLL2_DEV_GRP	0xE0
+
+static int lcd_enabled;
+static int dvi_enabled;
+
+static void __init omap3_stalker_display_init(void)
+{
+	return;
+}
+
+static int omap3_stalker_enable_lcd(struct omap_dss_device *dssdev)
+{
+	if (dvi_enabled) {
+		printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
+		return -EINVAL;
+	}
+	gpio_set_value(DSS_ENABLE_GPIO, 1);
+	gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 1);
+	lcd_enabled = 1;
+	return 0;
+}
+
+static void omap3_stalker_disable_lcd(struct omap_dss_device *dssdev)
+{
+	gpio_set_value(DSS_ENABLE_GPIO, 0);
+	gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 0);
+	lcd_enabled = 0;
+}
+
+static struct omap_dss_device omap3_stalker_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "generic_panel",
+	.phy.dpi.data_lines	= 24,
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.platform_enable	= omap3_stalker_enable_lcd,
+	.platform_disable	= omap3_stalker_disable_lcd,
+};
+
+static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev)
+{
+	return 0;
+}
+
+static void omap3_stalker_disable_tv(struct omap_dss_device *dssdev)
+{
+}
+
+static struct omap_dss_device omap3_stalker_tv_device = {
+	.name			= "tv",
+	.driver_name		= "venc",
+	.type			= OMAP_DISPLAY_TYPE_VENC,
+#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
+	.phy.venc.type		= OMAP_DSS_VENC_TYPE_SVIDEO,
+#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
+	.u.venc.type		= OMAP_DSS_VENC_TYPE_COMPOSITE,
+#endif
+	.platform_enable	= omap3_stalker_enable_tv,
+	.platform_disable	= omap3_stalker_disable_tv,
+};
+
+static int omap3_stalker_enable_dvi(struct omap_dss_device *dssdev)
+{
+	if (lcd_enabled) {
+		printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
+		return -EINVAL;
+	}
+	gpio_set_value(DSS_ENABLE_GPIO, 1);
+	dvi_enabled = 1;
+	return 0;
+}
+
+static void omap3_stalker_disable_dvi(struct omap_dss_device *dssdev)
+{
+	gpio_set_value(DSS_ENABLE_GPIO, 0);
+	dvi_enabled = 0;
+}
+
+static struct omap_dss_device omap3_stalker_dvi_device = {
+	.name			= "dvi",
+	.driver_name		= "generic_panel",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.phy.dpi.data_lines	= 24,
+	.platform_enable	= omap3_stalker_enable_dvi,
+	.platform_disable	= omap3_stalker_disable_dvi,
+};
+
+static struct omap_dss_device *omap3_stalker_dss_devices[] = {
+	&omap3_stalker_lcd_device,
+	&omap3_stalker_tv_device,
+	&omap3_stalker_dvi_device,
+};
+
+static struct omap_dss_board_info omap3_stalker_dss_data = {
+	.num_devices	= ARRAY_SIZE(omap3_stalker_dss_devices),
+	.devices	= omap3_stalker_dss_devices,
+	.default_device	= &omap3_stalker_dvi_device,
+};
+
+static struct platform_device omap3_stalker_dss_device = {
+	.name	= "omapdss",
+	.id	= -1,
+	.dev	= {
+		.platform_data	= &omap3_stalker_dss_data,
+	},
+};
+
+static struct regulator_consumer_supply omap3stalker_vmmc1_supply = {
+	.supply		= "vmmc",
+};
+
+static struct regulator_consumer_supply omap3stalker_vsim_supply = {
+	.supply		= "vmmc_aux",
+};
+
+/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+static struct regulator_init_data omap3stalker_vmmc1 = {
+	.constraints		= {
+		.min_uV			= 1850000,
+		.max_uV			= 3150000,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+		| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+		| REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &omap3stalker_vmmc1_supply,
+};
+
+/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
+static struct regulator_init_data omap3stalker_vsim = {
+	.constraints		= {
+		.min_uV			= 1800000,
+		.max_uV			= 3000000,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+		| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+		| REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &omap3stalker_vsim_supply,
+};
+
+static struct omap2_hsmmc_info mmc[] = {
+	{
+	 .mmc		= 1,
+	 .wires		= 4,
+	 .gpio_cd	= -EINVAL,
+	 .gpio_wp	= 23,
+	 },
+	{}			/* Terminator */
+};
+
+static struct gpio_keys_button gpio_buttons[] = {
+	{
+	 .code		= BTN_EXTRA,
+	 .gpio		= 18,
+	 .desc		= "user",
+	 .wakeup	= 1,
+	 },
+};
+
+static struct gpio_keys_platform_data gpio_key_info = {
+	.buttons	= gpio_buttons,
+	.nbuttons	= ARRAY_SIZE(gpio_buttons),
+};
+
+static struct platform_device keys_gpio = {
+	.name		= "gpio-keys",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &gpio_key_info,
+	},
+};
+
+static struct gpio_led gpio_leds[] = {
+	{
+	 .name			= "stalker:D8:usr0",
+	 .default_trigger	= "default-on",
+	 .gpio			= 126,
+	 },
+	{
+	 .name			= "stalker:D9:usr1",
+	 .default_trigger	= "default-on",
+	 .gpio			= 127,
+	 },
+	{
+	 .name			= "stalker:D3:mmc0",
+	 .gpio			= -EINVAL,	/* gets replaced */
+	 .active_low		= true,
+	 .default_trigger	= "mmc0",
+	 },
+	{
+	 .name			= "stalker:D4:heartbeat",
+	 .gpio			= -EINVAL,	/* gets replaced */
+	 .active_low		= true,
+	 .default_trigger	= "heartbeat",
+	 },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+	.leds		= gpio_leds,
+	.num_leds	= ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+	.name	= "leds-gpio",
+	.id	= -1,
+	.dev	= {
+		.platform_data	= &gpio_led_info,
+	},
+};
+
+static int
+omap3stalker_twl_gpio_setup(struct device *dev,
+			    unsigned gpio, unsigned ngpio)
+{
+	/* gpio + 0 is "mmc0_cd" (input/IRQ) */
+	omap_mux_init_gpio(23, OMAP_PIN_INPUT);
+	mmc[0].gpio_cd = gpio + 0;
+	omap2_hsmmc_init(mmc);
+
+	/* link regulators to MMC adapters */
+	omap3stalker_vmmc1_supply.dev = mmc[0].dev;
+	omap3stalker_vsim_supply.dev = mmc[0].dev;
+
+	/*
+	 * Most GPIOs are for USB OTG.  Some are mostly sent to
+	 * the P2 connector; notably LEDA for the LCD backlight.
+	 */
+
+	/* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
+	gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
+	gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+	/* gpio + 7 == DVI Enable */
+	gpio_request(gpio + 7, "EN_DVI");
+	gpio_direction_output(gpio + 7, 0);
+
+	/* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */
+	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
+	/* GPIO + 13 == ledsync (out, heartbeat) */
+	gpio_leds[3].gpio = gpio + 13;
+
+	platform_device_register(&leds_gpio);
+	return 0;
+}
+
+static struct twl4030_gpio_platform_data omap3stalker_gpio_data = {
+	.gpio_base	= OMAP_MAX_GPIO_LINES,
+	.irq_base	= TWL4030_GPIO_IRQ_BASE,
+	.irq_end	= TWL4030_GPIO_IRQ_END,
+	.use_leds	= true,
+	.setup		= omap3stalker_twl_gpio_setup,
+};
+
+static struct twl4030_usb_data omap3stalker_usb_data = {
+	.usb_mode	= T2_USB_MODE_ULPI,
+};
+
+static int board_keymap[] = {
+	KEY(0, 0, KEY_LEFT),
+	KEY(0, 1, KEY_DOWN),
+	KEY(0, 2, KEY_ENTER),
+	KEY(0, 3, KEY_M),
+
+	KEY(1, 0, KEY_RIGHT),
+	KEY(1, 1, KEY_UP),
+	KEY(1, 2, KEY_I),
+	KEY(1, 3, KEY_N),
+
+	KEY(2, 0, KEY_A),
+	KEY(2, 1, KEY_E),
+	KEY(2, 2, KEY_J),
+	KEY(2, 3, KEY_O),
+
+	KEY(3, 0, KEY_B),
+	KEY(3, 1, KEY_F),
+	KEY(3, 2, KEY_K),
+	KEY(3, 3, KEY_P)
+};
+
+static struct matrix_keymap_data board_map_data = {
+	.keymap		= board_keymap,
+	.keymap_size	= ARRAY_SIZE(board_keymap),
+};
+
+static struct twl4030_keypad_data omap3stalker_kp_data = {
+	.keymap_data	= &board_map_data,
+	.rows		= 4,
+	.cols		= 4,
+	.rep		= 1,
+};
+
+static struct twl4030_madc_platform_data omap3stalker_madc_data = {
+	.irq_line	= 1,
+};
+
+static struct twl4030_codec_audio_data omap3stalker_audio_data = {
+	.audio_mclk	= 26000000,
+};
+
+static struct twl4030_codec_data omap3stalker_codec_data = {
+	.audio_mclk	= 26000000,
+	.audio		= &omap3stalker_audio_data,
+};
+
+static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply = {
+	.supply		= "vdda_dac",
+	.dev		= &omap3_stalker_dss_device.dev,
+};
+
+/* VDAC for DSS driving S-Video */
+static struct regulator_init_data omap3_stalker_vdac = {
+	.constraints		= {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+		| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_MODE
+		| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &omap3_stalker_vdda_dac_supply,
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply omap3_stalker_vpll2_supply = {
+	.supply		= "vdds_dsi",
+	.dev		= &omap3_stalker_lcd_device.dev,
+};
+
+static struct regulator_init_data omap3_stalker_vpll2 = {
+	.constraints		= {
+		.name			= "VDVI",
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV = true,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+		| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_MODE
+		| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &omap3_stalker_vpll2_supply,
+};
+
+static struct twl4030_platform_data omap3stalker_twldata = {
+	.irq_base	= TWL4030_IRQ_BASE,
+	.irq_end	= TWL4030_IRQ_END,
+
+	/* platform_data for children goes here */
+	.keypad		= &omap3stalker_kp_data,
+	.madc		= &omap3stalker_madc_data,
+	.usb		= &omap3stalker_usb_data,
+	.gpio		= &omap3stalker_gpio_data,
+	.codec		= &omap3stalker_codec_data,
+	.vdac		= &omap3_stalker_vdac,
+	.vpll2		= &omap3_stalker_vpll2,
+};
+
+static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo[] = {
+	{
+	 I2C_BOARD_INFO("twl4030", 0x48),
+	 .flags		= I2C_CLIENT_WAKE,
+	 .irq		= INT_34XX_SYS_NIRQ,
+	 .platform_data	= &omap3stalker_twldata,
+	 },
+};
+
+static struct at24_platform_data fram_info = {
+	.byte_len	= (64 * 1024) / 8,
+	.page_size	= 8192,
+	.flags		= AT24_FLAG_ADDR16 | AT24_FLAG_IRUGO,
+};
+
+static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
+	{
+	 I2C_BOARD_INFO("24c64", 0x50),
+	 .flags		= I2C_CLIENT_WAKE,
+	 .platform_data	= &fram_info,
+	 },
+};
+
+static int __init omap3_stalker_i2c_init(void)
+{
+	/*
+	 * REVISIT: These entries can be set in omap3evm_twl_data
+	 * after a merge with MFD tree
+	 */
+	omap3stalker_twldata.vmmc1 = &omap3stalker_vmmc1;
+	omap3stalker_twldata.vsim = &omap3stalker_vsim;
+
+	omap_register_i2c_bus(1, 2600, omap3stalker_i2c_boardinfo,
+			      ARRAY_SIZE(omap3stalker_i2c_boardinfo));
+	omap_register_i2c_bus(2, 400, NULL, 0);
+	omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
+			      ARRAY_SIZE(omap3stalker_i2c_boardinfo3));
+	return 0;
+}
+
+#define OMAP3_STALKER_TS_GPIO	175
+static void ads7846_dev_init(void)
+{
+	if (gpio_request(OMAP3_STALKER_TS_GPIO, "ADS7846 pendown") < 0)
+		printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
+
+	gpio_direction_input(OMAP3_STALKER_TS_GPIO);
+
+	omap_set_gpio_debounce(OMAP3_STALKER_TS_GPIO, 1);
+	omap_set_gpio_debounce_time(OMAP3_STALKER_TS_GPIO, 0xa);
+}
+
+static int ads7846_get_pendown_state(void)
+{
+	return !gpio_get_value(OMAP3_STALKER_TS_GPIO);
+}
+
+static struct ads7846_platform_data ads7846_config = {
+	.x_max			= 0x0fff,
+	.y_max			= 0x0fff,
+	.x_plate_ohms		= 180,
+	.pressure_max		= 255,
+	.debounce_max		= 10,
+	.debounce_tol		= 3,
+	.debounce_rep		= 1,
+	.get_pendown_state	= ads7846_get_pendown_state,
+	.keep_vref_on		= 1,
+	.settle_delay_usecs	= 150,
+};
+
+static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+	.turbo_mode		= 0,
+	.single_channel		= 1,	/* 0: slave, 1: master */
+};
+
+struct spi_board_info omap3stalker_spi_board_info[] = {
+	[0] = {
+	       .modalias	= "ads7846",
+	       .bus_num		= 1,
+	       .chip_select	= 0,
+	       .max_speed_hz	= 1500000,
+	       .controller_data	= &ads7846_mcspi_config,
+	       .irq		= OMAP_GPIO_IRQ(OMAP3_STALKER_TS_GPIO),
+	       .platform_data	= &ads7846_config,
+	},
+};
+
+static struct omap_board_config_kernel omap3_stalker_config[] __initdata = {
+};
+
+static void __init omap3_stalker_init_irq(void)
+{
+	omap_board_config = omap3_stalker_config;
+	omap_board_config_size = ARRAY_SIZE(omap3_stalker_config);
+	omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
+	omap_init_irq();
+#ifdef CONFIG_OMAP_32K_TIMER
+	omap2_gp_clockevent_set_gptimer(12);
+#endif
+	omap_gpio_init();
+}
+
+static struct platform_device *omap3_stalker_devices[] __initdata = {
+	&omap3_stalker_dss_device,
+	&keys_gpio,
+};
+
+static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+	.phy_reset = true,
+	.reset_gpio_port[0] = -EINVAL,
+	.reset_gpio_port[1] = 21,
+	.reset_gpio_port[2] = -EINVAL,
+};
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
+		  OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
+	OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
+		  OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
+	{.reg_offset = OMAP_MUX_TERMINATOR},
+};
+#else
+#define board_mux	NULL
+#endif
+
+static struct omap_musb_board_data musb_board_data = {
+	.interface_type	= MUSB_INTERFACE_ULPI,
+	.mode		= MUSB_OTG,
+	.power		= 100,
+};
+
+static void __init omap3_stalker_init(void)
+{
+	omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
+
+	omap3_stalker_i2c_init();
+
+	platform_add_devices(omap3_stalker_devices,
+			     ARRAY_SIZE(omap3_stalker_devices));
+
+	spi_register_board_info(omap3stalker_spi_board_info,
+				ARRAY_SIZE(omap3stalker_spi_board_info));
+
+	omap_serial_init();
+	usb_musb_init(&musb_board_data);
+	usb_ehci_init(&ehci_pdata);
+	ads7846_dev_init();
+
+	omap_mux_init_gpio(21, OMAP_PIN_OUTPUT);
+	omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP);
+
+	omap3stalker_init_eth();
+	omap3_stalker_display_init();
+/* Ensure SDRC pins are mux'd for self-refresh */
+	omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT);
+	omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT);
+}
+
+static void __init omap3_stalker_map_io(void)
+{
+	omap2_set_globals_343x();
+	omap34xx_map_common_io();
+}
+
+MACHINE_START(SBC3530, "OMAP3 STALKER")
+	/* Maintainer: Jason Lam -lzg@ema-tech.com */
+	.phys_io		= 0x48000000,
+	.io_pg_offst		= ((0xfa000000) >> 18) & 0xfffc,
+	.boot_params		= 0x80000100,
+	.map_io			= omap3_stalker_map_io,
+	.init_irq		= omap3_stalker_init_irq,
+	.init_machine		= omap3_stalker_init,
+	.timer			= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8848c7c..79ac414 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -63,6 +63,8 @@
 
 #define OVERO_SMSC911X_CS      5
 #define OVERO_SMSC911X_GPIO    176
+#define OVERO_SMSC911X2_CS     4
+#define OVERO_SMSC911X2_GPIO   65
 
 #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
 	defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
@@ -137,6 +139,16 @@
 	},
 };
 
+static struct resource overo_smsc911x2_resources[] = {
+	{
+		.name	= "smsc911x2-memory",
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+	},
+};
+
 static struct smsc911x_platform_config overo_smsc911x_config = {
 	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
 	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
@@ -146,7 +158,7 @@
 
 static struct platform_device overo_smsc911x_device = {
 	.name		= "smsc911x",
-	.id		= -1,
+	.id		= 0,
 	.num_resources	= ARRAY_SIZE(overo_smsc911x_resources),
 	.resource	= overo_smsc911x_resources,
 	.dev		= {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 4377a4c..abdf321 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -45,6 +45,8 @@
 /* list all spi devices here */
 enum {
 	RX51_SPI_WL1251,
+	RX51_SPI_MIPID,		/* LCD panel */
+	RX51_SPI_TSC2005,	/* Touch Controller */
 };
 
 static struct wl12xx_platform_data wl1251_pdata;
@@ -54,6 +56,16 @@
 	.single_channel	= 1,
 };
 
+static struct omap2_mcspi_device_config mipid_mcspi_config = {
+	.turbo_mode	= 0,
+	.single_channel	= 1,
+};
+
+static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+	.turbo_mode	= 0,
+	.single_channel	= 1,
+};
+
 static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
 	[RX51_SPI_WL1251] = {
 		.modalias		= "wl1251",
@@ -64,6 +76,22 @@
 		.controller_data	= &wl1251_mcspi_config,
 		.platform_data		= &wl1251_pdata,
 	},
+	[RX51_SPI_MIPID] = {
+		.modalias		= "acx565akm",
+		.bus_num		= 1,
+		.chip_select		= 2,
+		.max_speed_hz		= 6000000,
+		.controller_data	= &mipid_mcspi_config,
+	},
+	[RX51_SPI_TSC2005] = {
+		.modalias		= "tsc2005",
+		.bus_num		= 1,
+		.chip_select		= 0,
+		/* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
+		.max_speed_hz		= 6000000,
+		.controller_data	= &tsc2005_mcspi_config,
+		/* .platform_data = &tsc2005_config,*/
+	},
 };
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
@@ -277,7 +305,7 @@
 	.dev_name = "mmci-omap-hs.0",
 };
 
-static struct regulator_consumer_supply rx51_vmmc2_supply = {
+static struct regulator_consumer_supply rx51_vaux3_supply = {
 	.supply   = "vmmc",
 	.dev_name = "mmci-omap-hs.1",
 };
@@ -287,6 +315,48 @@
 	.dev_name = "mmci-omap-hs.1",
 };
 
+static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
+	/* tlv320aic3x analog supplies */
+	{
+		.supply		= "AVDD",
+		.dev_name	= "2-0018",
+	},
+	{
+		.supply		= "DRVDD",
+		.dev_name	= "2-0018",
+	},
+	/* Keep vmmc as last item. It is not iterated for newer boards */
+	{
+		.supply		= "vmmc",
+		.dev_name	= "mmci-omap-hs.1",
+	},
+};
+
+static struct regulator_consumer_supply rx51_vio_supplies[] = {
+	/* tlv320aic3x digital supplies */
+	{
+		.supply		= "IOVDD",
+		.dev_name	= "2-0018"
+	},
+	{
+		.supply		= "DVDD",
+		.dev_name	= "2-0018"
+	},
+};
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+extern struct platform_device rx51_display_device;
+#endif
+
+static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+	{
+		.supply	= "vdds_sdi",
+		.dev	= &rx51_display_device.dev,
+	},
+#endif
+};
+
 static struct regulator_init_data rx51_vaux1 = {
 	.constraints = {
 		.name			= "V28",
@@ -297,6 +367,8 @@
 		.valid_ops_mask		= REGULATOR_CHANGE_MODE
 					| REGULATOR_CHANGE_STATUS,
 	},
+	.num_consumer_supplies	= ARRAY_SIZE(rx51_vaux1_consumers),
+	.consumer_supplies	= rx51_vaux1_consumers,
 };
 
 static struct regulator_init_data rx51_vaux2 = {
@@ -338,7 +410,7 @@
 					| REGULATOR_CHANGE_STATUS,
 	},
 	.num_consumer_supplies	= 1,
-	.consumer_supplies	= &rx51_vmmc2_supply,
+	.consumer_supplies	= &rx51_vaux3_supply,
 };
 
 static struct regulator_init_data rx51_vaux4 = {
@@ -370,9 +442,9 @@
 
 static struct regulator_init_data rx51_vmmc2 = {
 	.constraints = {
-		.name			= "VMMC2_30",
-		.min_uV			= 1850000,
-		.max_uV			= 3150000,
+		.name			= "V28_A",
+		.min_uV			= 2800000,
+		.max_uV			= 3000000,
 		.apply_uV		= true,
 		.valid_modes_mask	= REGULATOR_MODE_NORMAL
 					| REGULATOR_MODE_STANDBY,
@@ -380,8 +452,8 @@
 					| REGULATOR_CHANGE_MODE
 					| REGULATOR_CHANGE_STATUS,
 	},
-	.num_consumer_supplies	= 1,
-	.consumer_supplies	= &rx51_vmmc2_supply,
+	.num_consumer_supplies	= ARRAY_SIZE(rx51_vmmc2_supplies),
+	.consumer_supplies	= rx51_vmmc2_supplies,
 };
 
 static struct regulator_init_data rx51_vsim = {
@@ -411,6 +483,20 @@
 	},
 };
 
+static struct regulator_init_data rx51_vio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(rx51_vio_supplies),
+	.consumer_supplies	= rx51_vio_supplies,
+};
+
 static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
 {
 	/* FIXME this gpio setup is just a placeholder for now */
@@ -618,6 +704,7 @@
 	.vmmc1			= &rx51_vmmc1,
 	.vsim			= &rx51_vsim,
 	.vdac			= &rx51_vdac,
+	.vio			= &rx51_vio,
 };
 
 static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
@@ -629,18 +716,27 @@
 	},
 };
 
+static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+	{
+		I2C_BOARD_INFO("tlv320aic3x", 0x18),
+	},
+};
+
 static int __init rx51_i2c_init(void)
 {
 	if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
-	    system_rev >= SYSTEM_REV_B_USES_VAUX3)
+	    system_rev >= SYSTEM_REV_B_USES_VAUX3) {
 		rx51_twldata.vaux3 = &rx51_vaux3_mmc;
-	else {
+		/* Only older boards use VMMC2 for internal MMC */
+		rx51_vmmc2.num_consumer_supplies--;
+	} else {
 		rx51_twldata.vaux3 = &rx51_vaux3_cam;
-		rx51_twldata.vmmc2 = &rx51_vmmc2;
 	}
+	rx51_twldata.vmmc2 = &rx51_vmmc2;
 	omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
-			ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
-	omap_register_i2c_bus(2, 100, NULL, 0);
+			      ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
+	omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
+			      ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
 	omap_register_i2c_bus(3, 400, NULL, 0);
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
new file mode 100644
index 0000000..b743a4f
--- /dev/null
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -0,0 +1,109 @@
+/*
+ * linux/arch/arm/mach-omap2/board-rx51-video.c
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mm.h>
+
+#include <asm/mach-types.h>
+#include <plat/mux.h>
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/mcspi.h>
+
+#include "mux.h"
+
+#define RX51_LCD_RESET_GPIO	90
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static int rx51_lcd_enable(struct omap_dss_device *dssdev)
+{
+	gpio_set_value(dssdev->reset_gpio, 1);
+	return 0;
+}
+
+static void rx51_lcd_disable(struct omap_dss_device *dssdev)
+{
+	gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device rx51_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "panel-acx565akm",
+	.type			= OMAP_DISPLAY_TYPE_SDI,
+	.phy.sdi.datapairs	= 2,
+	.reset_gpio		= RX51_LCD_RESET_GPIO,
+	.platform_enable	= rx51_lcd_enable,
+	.platform_disable	= rx51_lcd_disable,
+};
+
+static struct omap_dss_device *rx51_dss_devices[] = {
+	&rx51_lcd_device,
+};
+
+static struct omap_dss_board_info rx51_dss_board_info = {
+	.num_devices	= ARRAY_SIZE(rx51_dss_devices),
+	.devices	= rx51_dss_devices,
+	.default_device	= &rx51_lcd_device,
+};
+
+struct platform_device rx51_display_device = {
+	.name	= "omapdss",
+	.id	= -1,
+	.dev	= {
+		.platform_data = &rx51_dss_board_info,
+	},
+};
+
+static struct platform_device *rx51_video_devices[] __initdata = {
+	&rx51_display_device,
+};
+
+static int __init rx51_video_init(void)
+{
+	if (!machine_is_nokia_rx51())
+		return 0;
+
+	if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
+		pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
+		return 0;
+	}
+
+	if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
+		pr_err("%s failed to get LCD Reset GPIO\n", __func__);
+		return 0;
+	}
+
+	gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
+
+	platform_add_devices(rx51_video_devices,
+				ARRAY_SIZE(rx51_video_devices));
+	return 0;
+}
+
+subsys_initcall(rx51_video_init);
+
+void __init rx51_video_mem_init(void)
+{
+	/*
+	 * GFX 864x480x32bpp
+	 * VID1/2 1280x720x32bpp double buffered
+	 */
+	omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
+			2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
+}
+
+#else
+void __init rx51_video_mem_init(void) { }
+#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index b155c36..1b86b5b 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -36,6 +36,7 @@
 #define RX51_GPIO_SLEEP_IND 162
 
 struct omap_sdrc_params *rx51_get_sdram_timings(void);
+extern void rx51_video_mem_init(void);
 
 static struct gpio_led gpio_leds[] = {
 	{
@@ -143,6 +144,7 @@
 static void __init rx51_map_io(void)
 {
 	omap2_set_globals_343x();
+	rx51_video_mem_init();
 	omap34xx_map_common_io();
 }
 
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index e15d2e8..1d7f827 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -82,7 +82,7 @@
 
 static struct plat_serial8250_port serial_platform_data[] = {
 	{
-		.mapbase	= 0x10000000,
+		.mapbase	= ZOOM_UART_BASE,
 		.irq		= OMAP_GPIO_IRQ(102),
 		.flags		= UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
 		.irqflags	= IRQF_SHARED | IRQF_TRIGGER_RISING,
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index 9a26f84..803ef14 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -91,8 +91,8 @@
 }
 
 MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
-	.phys_io	= 0x48000000,
-	.io_pg_offst	= ((0xfa000000) >> 18) & 0xfffc,
+	.phys_io	= ZOOM_UART_BASE,
+	.io_pg_offst	= (ZOOM_UART_VIRT >> 18) & 0xfffc,
 	.boot_params	= 0x80000100,
 	.map_io		= omap_zoom2_map_io,
 	.init_irq	= omap_zoom2_init_irq,
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index cd3e40c..3314704 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -73,8 +73,8 @@
 }
 
 MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
-	.phys_io	= 0x48000000,
-	.io_pg_offst	= ((0xfa000000) >> 18) & 0xfffc,
+	.phys_io	= ZOOM_UART_BASE,
+	.io_pg_offst	= (ZOOM_UART_VIRT >> 18) & 0xfffc,
 	.boot_params	= 0x80000100,
 	.map_io		= omap_zoom_map_io,
 	.init_irq	= omap_zoom_init_irq,
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
index 43d7246..66e01ac 100644
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_apll.c
@@ -70,12 +70,12 @@
 
 static int omap2_clk_apll96_enable(struct clk *clk)
 {
-	return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL);
+	return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL_MASK);
 }
 
 static int omap2_clk_apll54_enable(struct clk *clk)
 {
-	return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL);
+	return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL_MASK);
 }
 
 /* Stop APLL */
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index e60ca4e..aef6291 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -68,16 +68,13 @@
 {
 	const struct prcm_config *ptr;
 	long highest_rate;
-	long sys_ck_rate;
-
-	sys_ck_rate = clk_get_rate(sclk);
 
 	highest_rate = -EINVAL;
 
 	for (ptr = rate_table; ptr->mpu_speed; ptr++) {
 		if (!(ptr->flags & cpu_mask))
 			continue;
-		if (ptr->xtal_speed != sys_ck_rate)
+		if (ptr->xtal_speed != sclk->rate)
 			continue;
 
 		highest_rate = ptr->mpu_speed;
@@ -96,15 +93,12 @@
 	const struct prcm_config *prcm;
 	unsigned long found_speed = 0;
 	unsigned long flags;
-	long sys_ck_rate;
-
-	sys_ck_rate = clk_get_rate(sclk);
 
 	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
 		if (!(prcm->flags & cpu_mask))
 			continue;
 
-		if (prcm->xtal_speed != sys_ck_rate)
+		if (prcm->xtal_speed != sclk->rate)
 			continue;
 
 		if (prcm->mpu_speed <= rate) {
@@ -181,19 +175,16 @@
 void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
 {
 	const struct prcm_config *prcm;
-	long sys_ck_rate;
 	int i = 0;
 	int tbl_sz = 0;
 
 	if (!cpu_is_omap24xx())
 		return;
 
-	sys_ck_rate = clk_get_rate(sclk);
-
 	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
 		if (!(prcm->flags & cpu_mask))
 			continue;
-		if (prcm->xtal_speed != sys_ck_rate)
+		if (prcm->xtal_speed != sclk->rate)
 			continue;
 
 		/* don't put bypass rates in table */
@@ -226,7 +217,7 @@
 	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
 		if (!(prcm->flags & cpu_mask))
 			continue;
-		if (prcm->xtal_speed != sys_ck_rate)
+		if (prcm->xtal_speed != sclk->rate)
 			continue;
 
 		/* don't put bypass rates in table */
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
index e50812d..a781cd6 100644
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ b/arch/arm/mach-omap2/clkt_clksel.c
@@ -12,8 +12,26 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * XXX At some point these clksel clocks should be split into
- * "divider" clocks and "mux" clocks to better match the hardware.
+ *
+ * clksel clocks are clocks that do not have a fixed parent, or that
+ * can divide their parent's rate, or possibly both at the same time, based
+ * on the contents of a hardware register bitfield.
+ *
+ * All of the various mux and divider settings can be encoded into
+ * struct clksel* data structures, and then these can be autogenerated
+ * from some hardware database for each new chip generation.  This
+ * should avoid the need to write, review, and validate a lot of new
+ * clock code for each new chip, since it can be exported from the SoC
+ * design flow.  This is now done on OMAP4.
+ *
+ * The fusion of mux and divider clocks is a software creation.  In
+ * hardware reality, the multiplexer (parent selection) and the
+ * divider exist separately.  XXX At some point these clksel clocks
+ * should be split into "divider" clocks and "mux" clocks to better
+ * match the hardware.
+ *
+ * (The name "clksel" comes from the name of the corresponding
+ * register field in the OMAP2/3 family of SoCs.)
  *
  * XXX Currently these clocks are only used in the OMAP2/3/4 code, but
  * many of the OMAP1 clocks should be convertible to use this
@@ -29,14 +47,11 @@
 #include <plat/clock.h>
 
 #include "clock.h"
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-#include "cm-regbits-34xx.h"
 
 /* Private functions */
 
 /**
- * _omap2_get_clksel_by_parent - return clksel struct for a given clk & parent
+ * _get_clksel_by_parent() - return clksel struct for a given clk & parent
  * @clk: OMAP struct clk ptr to inspect
  * @src_clk: OMAP struct clk ptr of the parent clk to search for
  *
@@ -44,141 +59,217 @@
  * the element associated with the supplied parent clock address.
  * Returns a pointer to the struct clksel on success or NULL on error.
  */
-static const struct clksel *_omap2_get_clksel_by_parent(struct clk *clk,
-							struct clk *src_clk)
+static const struct clksel *_get_clksel_by_parent(struct clk *clk,
+						  struct clk *src_clk)
 {
 	const struct clksel *clks;
 
-	if (!clk->clksel)
-		return NULL;
-
-	for (clks = clk->clksel; clks->parent; clks++) {
+	for (clks = clk->clksel; clks->parent; clks++)
 		if (clks->parent == src_clk)
 			break; /* Found the requested parent */
-	}
 
 	if (!clks->parent) {
-		printk(KERN_ERR "clock: Could not find parent clock %s in "
-		       "clksel array of clock %s\n", src_clk->name,
-		       clk->name);
+		/* This indicates a data problem */
+		WARN(1, "clock: Could not find parent clock %s in clksel array "
+		     "of clock %s\n", src_clk->name, clk->name);
 		return NULL;
 	}
 
 	return clks;
 }
 
-/*
- * Converts encoded control register address into a full address
- * On error, the return value (parent_div) will be 0.
+/**
+ * _get_div_and_fieldval() - find the new clksel divisor and field value to use
+ * @src_clk: planned new parent struct clk *
+ * @clk: struct clk * that is being reparented
+ * @field_val: pointer to a u32 to contain the register data for the divisor
+ *
+ * Given an intended new parent struct clk * @src_clk, and the struct
+ * clk * @clk to the clock that is being reparented, find the
+ * appropriate rate divisor for the new clock (returned as the return
+ * value), and the corresponding register bitfield data to program to
+ * reach that divisor (returned in the u32 pointed to by @field_val).
+ * Returns 0 on error, or returns the newly-selected divisor upon
+ * success (in this latter case, the corresponding register bitfield
+ * value is passed back in the variable pointed to by @field_val)
  */
-static u32 _omap2_clksel_get_src_field(struct clk *src_clk, struct clk *clk,
-				       u32 *field_val)
+static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
+				u32 *field_val)
+{
+	const struct clksel *clks;
+	const struct clksel_rate *clkr, *max_clkr;
+	u8 max_div = 0;
+
+	clks = _get_clksel_by_parent(clk, src_clk);
+	if (!clks)
+		return 0;
+
+	/*
+	 * Find the highest divisor (e.g., the one resulting in the
+	 * lowest rate) to use as the default.  This should avoid
+	 * clock rates that are too high for the device.  XXX A better
+	 * solution here would be to try to determine if there is a
+	 * divisor matching the original clock rate before the parent
+	 * switch, and if it cannot be found, to fall back to the
+	 * highest divisor.
+	 */
+	for (clkr = clks->rates; clkr->div; clkr++) {
+		if (!(clkr->flags & cpu_mask))
+			continue;
+
+		if (clkr->div > max_div) {
+			max_div = clkr->div;
+			max_clkr = clkr;
+		}
+	}
+
+	if (max_div == 0) {
+		/* This indicates an error in the clksel data */
+		WARN(1, "clock: Could not find divisor for clock %s parent %s"
+		     "\n", clk->name, src_clk->parent->name);
+		return 0;
+	}
+
+	*field_val = max_clkr->val;
+
+	return max_div;
+}
+
+/**
+ * _write_clksel_reg() - program a clock's clksel register in hardware
+ * @clk: struct clk * to program
+ * @v: clksel bitfield value to program (with LSB at bit 0)
+ *
+ * Shift the clksel register bitfield value @v to its appropriate
+ * location in the clksel register and write it in.  This function
+ * will ensure that the write to the clksel_reg reaches its
+ * destination before returning -- important since PRM and CM register
+ * accesses can be quite slow compared to ARM cycles -- but does not
+ * take into account any time the hardware might take to switch the
+ * clock source.
+ */
+static void _write_clksel_reg(struct clk *clk, u32 field_val)
+{
+	u32 v;
+
+	v = __raw_readl(clk->clksel_reg);
+	v &= ~clk->clksel_mask;
+	v |= field_val << __ffs(clk->clksel_mask);
+	__raw_writel(v, clk->clksel_reg);
+
+	v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+}
+
+/**
+ * _clksel_to_divisor() - turn clksel field value into integer divider
+ * @clk: OMAP struct clk to use
+ * @field_val: register field value to find
+ *
+ * Given a struct clk of a rate-selectable clksel clock, and a register field
+ * value to search for, find the corresponding clock divisor.  The register
+ * field value should be pre-masked and shifted down so the LSB is at bit 0
+ * before calling.  Returns 0 on error or returns the actual integer divisor
+ * upon success.
+ */
+static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
 {
 	const struct clksel *clks;
 	const struct clksel_rate *clkr;
 
-	clks = _omap2_get_clksel_by_parent(clk, src_clk);
+	clks = _get_clksel_by_parent(clk, clk->parent);
 	if (!clks)
 		return 0;
 
 	for (clkr = clks->rates; clkr->div; clkr++) {
-		if (clkr->flags & cpu_mask && clkr->flags & DEFAULT_RATE)
-			break; /* Found the default rate for this platform */
+		if (!(clkr->flags & cpu_mask))
+			continue;
+
+		if (clkr->val == field_val)
+			break;
 	}
 
 	if (!clkr->div) {
-		printk(KERN_ERR "clock: Could not find default rate for "
-		       "clock %s parent %s\n", clk->name,
-		       src_clk->parent->name);
+		/* This indicates a data error */
+		WARN(1, "clock: Could not find fieldval %d for clock %s parent "
+		     "%s\n", field_val, clk->name, clk->parent->name);
 		return 0;
 	}
 
-	/* Should never happen.  Add a clksel mask to the struct clk. */
-	WARN_ON(clk->clksel_mask == 0);
-
-	*field_val = clkr->val;
-
 	return clkr->div;
 }
 
+/**
+ * _divisor_to_clksel() - turn clksel integer divisor into a field value
+ * @clk: OMAP struct clk to use
+ * @div: integer divisor to search for
+ *
+ * Given a struct clk of a rate-selectable clksel clock, and a clock
+ * divisor, find the corresponding register field value.  Returns the
+ * register field value _before_ left-shifting (i.e., LSB is at bit
+ * 0); or returns 0xFFFFFFFF (~0) upon error.
+ */
+static u32 _divisor_to_clksel(struct clk *clk, u32 div)
+{
+	const struct clksel *clks;
+	const struct clksel_rate *clkr;
+
+	/* should never happen */
+	WARN_ON(div == 0);
+
+	clks = _get_clksel_by_parent(clk, clk->parent);
+	if (!clks)
+		return ~0;
+
+	for (clkr = clks->rates; clkr->div; clkr++) {
+		if (!(clkr->flags & cpu_mask))
+			continue;
+
+		if (clkr->div == div)
+			break;
+	}
+
+	if (!clkr->div) {
+		pr_err("clock: Could not find divisor %d for clock %s parent "
+		       "%s\n", div, clk->name, clk->parent->name);
+		return ~0;
+	}
+
+	return clkr->val;
+}
+
+/**
+ * _read_divisor() - get current divisor applied to parent clock (from hdwr)
+ * @clk: OMAP struct clk to use.
+ *
+ * Read the current divisor register value for @clk that is programmed
+ * into the hardware, convert it into the actual divisor value, and
+ * return it; or return 0 on error.
+ */
+static u32 _read_divisor(struct clk *clk)
+{
+	u32 v;
+
+	if (!clk->clksel || !clk->clksel_mask)
+		return 0;
+
+	v = __raw_readl(clk->clksel_reg);
+	v &= clk->clksel_mask;
+	v >>= __ffs(clk->clksel_mask);
+
+	return _clksel_to_divisor(clk, v);
+}
 
 /* Public functions */
 
 /**
- * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware
- * @clk: OMAP clock struct ptr to use
- *
- * Given a pointer to a source-selectable struct clk, read the hardware
- * register and determine what its parent is currently set to.  Update the
- * clk->parent field with the appropriate clk ptr.
- */
-void omap2_init_clksel_parent(struct clk *clk)
-{
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-	u32 r, found = 0;
-
-	if (!clk->clksel)
-		return;
-
-	r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
-	r >>= __ffs(clk->clksel_mask);
-
-	for (clks = clk->clksel; clks->parent && !found; clks++) {
-		for (clkr = clks->rates; clkr->div && !found; clkr++) {
-			if ((clkr->flags & cpu_mask) && (clkr->val == r)) {
-				if (clk->parent != clks->parent) {
-					pr_debug("clock: inited %s parent "
-						 "to %s (was %s)\n",
-						 clk->name, clks->parent->name,
-						 ((clk->parent) ?
-						  clk->parent->name : "NULL"));
-					clk_reparent(clk, clks->parent);
-				};
-				found = 1;
-			}
-		}
-	}
-
-	if (!found)
-		printk(KERN_ERR "clock: init parent: could not find "
-		       "regval %0x for clock %s\n", r,  clk->name);
-
-	return;
-}
-
-/*
- * Used for clocks that are part of CLKSEL_xyz governed clocks.
- * REVISIT: Maybe change to use clk->enable() functions like on omap1?
- */
-unsigned long omap2_clksel_recalc(struct clk *clk)
-{
-	unsigned long rate;
-	u32 div = 0;
-
-	pr_debug("clock: recalc'ing clksel clk %s\n", clk->name);
-
-	div = omap2_clksel_get_divisor(clk);
-	if (div == 0)
-		return clk->rate;
-
-	rate = clk->parent->rate / div;
-
-	pr_debug("clock: new clock rate is %ld (div %d)\n", rate, div);
-
-	return rate;
-}
-
-/**
- * omap2_clksel_round_rate_div - find divisor for the given clock and rate
+ * omap2_clksel_round_rate_div() - find divisor for the given clock and rate
  * @clk: OMAP struct clk to use
  * @target_rate: desired clock rate
  * @new_div: ptr to where we should store the divisor
  *
  * Finds 'best' divider value in an array based on the source and target
  * rates.  The divider array must be sorted with smallest divider first.
- * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
- * they are only settable as part of virtual_prcm set.
+ * This function is also used by the DPLL3 M2 divider code.
  *
  * Returns the rounded clock rate or returns 0xffffffff on error.
  */
@@ -190,12 +281,15 @@
 	const struct clksel_rate *clkr;
 	u32 last_div = 0;
 
+	if (!clk->clksel || !clk->clksel_mask)
+		return ~0;
+
 	pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
 		 clk->name, target_rate);
 
 	*new_div = 1;
 
-	clks = _omap2_get_clksel_by_parent(clk, clk->parent);
+	clks = _get_clksel_by_parent(clk, clk->parent);
 	if (!clks)
 		return ~0;
 
@@ -231,16 +325,92 @@
 	return clk->parent->rate / clkr->div;
 }
 
+/*
+ * Clocktype interface functions to the OMAP clock code
+ * (i.e., those used in struct clk field function pointers, etc.)
+ */
+
 /**
- * omap2_clksel_round_rate - find rounded rate for the given clock and rate
+ * omap2_init_clksel_parent() - set a clksel clk's parent field from the hdwr
+ * @clk: OMAP clock struct ptr to use
+ *
+ * Given a pointer @clk to a source-selectable struct clk, read the
+ * hardware register and determine what its parent is currently set
+ * to.  Update @clk's .parent field with the appropriate clk ptr.  No
+ * return value.
+ */
+void omap2_init_clksel_parent(struct clk *clk)
+{
+	const struct clksel *clks;
+	const struct clksel_rate *clkr;
+	u32 r, found = 0;
+
+	if (!clk->clksel || !clk->clksel_mask)
+		return;
+
+	r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
+	r >>= __ffs(clk->clksel_mask);
+
+	for (clks = clk->clksel; clks->parent && !found; clks++) {
+		for (clkr = clks->rates; clkr->div && !found; clkr++) {
+			if (!(clkr->flags & cpu_mask))
+				continue;
+
+			if (clkr->val == r) {
+				if (clk->parent != clks->parent) {
+					pr_debug("clock: inited %s parent "
+						 "to %s (was %s)\n",
+						 clk->name, clks->parent->name,
+						 ((clk->parent) ?
+						  clk->parent->name : "NULL"));
+					clk_reparent(clk, clks->parent);
+				};
+				found = 1;
+			}
+		}
+	}
+
+	/* This indicates a data error */
+	WARN(!found, "clock: %s: init parent: could not find regval %0x\n",
+	     clk->name, r);
+
+	return;
+}
+
+/**
+ * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field
+ * @clk: struct clk *
+ *
+ * This function is intended to be called only by the clock framework.
+ * Each clksel clock should have its struct clk .recalc field set to this
+ * function.  Returns the clock's current rate, based on its parent's rate
+ * and its current divisor setting in the hardware.
+ */
+unsigned long omap2_clksel_recalc(struct clk *clk)
+{
+	unsigned long rate;
+	u32 div = 0;
+
+	div = _read_divisor(clk);
+	if (div == 0)
+		return clk->rate;
+
+	rate = clk->parent->rate / div;
+
+	pr_debug("clock: %s: recalc'd rate is %ld (div %d)\n", clk->name,
+		 rate, div);
+
+	return rate;
+}
+
+/**
+ * omap2_clksel_round_rate() - find rounded rate for the given clock and rate
  * @clk: OMAP struct clk to use
  * @target_rate: desired clock rate
  *
- * Compatibility wrapper for OMAP clock framework
+ * This function is intended to be called only by the clock framework.
  * Finds best target rate based on the source clock and possible dividers.
  * rates. The divider array must be sorted with smallest divider first.
- * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
- * they are only settable as part of virtual_prcm set.
  *
  * Returns the rounded clock rate or returns 0xffffffff on error.
  */
@@ -251,148 +421,78 @@
 	return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
 }
 
-
-/* Given a clock and a rate apply a clock specific rounding function */
-long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	if (clk->round_rate)
-		return clk->round_rate(clk, rate);
-
-	return clk->rate;
-}
-
 /**
- * omap2_clksel_to_divisor() - turn clksel field value into integer divider
- * @clk: OMAP struct clk to use
- * @field_val: register field value to find
+ * omap2_clksel_set_rate() - program clock rate in hardware
+ * @clk: struct clk * to program rate
+ * @rate: target rate to program
  *
- * Given a struct clk of a rate-selectable clksel clock, and a register field
- * value to search for, find the corresponding clock divisor.  The register
- * field value should be pre-masked and shifted down so the LSB is at bit 0
- * before calling.  Returns 0 on error
+ * This function is intended to be called only by the clock framework.
+ * Program @clk's rate to @rate in the hardware.  The clock can be
+ * either enabled or disabled when this happens, although if the clock
+ * is enabled, some downstream devices may glitch or behave
+ * unpredictably when the clock rate is changed - this depends on the
+ * hardware. This function does not currently check the usecount of
+ * the clock, so if multiple drivers are using the clock, and the rate
+ * is changed, they will all be affected without any notification.
+ * Returns -EINVAL upon error, or 0 upon success.
  */
-u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val)
-{
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-
-	clks = _omap2_get_clksel_by_parent(clk, clk->parent);
-	if (!clks)
-		return 0;
-
-	for (clkr = clks->rates; clkr->div; clkr++) {
-		if ((clkr->flags & cpu_mask) && (clkr->val == field_val))
-			break;
-	}
-
-	if (!clkr->div) {
-		printk(KERN_ERR "clock: Could not find fieldval %d for "
-		       "clock %s parent %s\n", field_val, clk->name,
-		       clk->parent->name);
-		return 0;
-	}
-
-	return clkr->div;
-}
-
-/**
- * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value
- * @clk: OMAP struct clk to use
- * @div: integer divisor to search for
- *
- * Given a struct clk of a rate-selectable clksel clock, and a clock divisor,
- * find the corresponding register field value.  The return register value is
- * the value before left-shifting.  Returns ~0 on error
- */
-u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
-{
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-
-	/* should never happen */
-	WARN_ON(div == 0);
-
-	clks = _omap2_get_clksel_by_parent(clk, clk->parent);
-	if (!clks)
-		return ~0;
-
-	for (clkr = clks->rates; clkr->div; clkr++) {
-		if ((clkr->flags & cpu_mask) && (clkr->div == div))
-			break;
-	}
-
-	if (!clkr->div) {
-		printk(KERN_ERR "clock: Could not find divisor %d for "
-		       "clock %s parent %s\n", div, clk->name,
-		       clk->parent->name);
-		return ~0;
-	}
-
-	return clkr->val;
-}
-
-/**
- * omap2_clksel_get_divisor - get current divider applied to parent clock.
- * @clk: OMAP struct clk to use.
- *
- * Returns the integer divisor upon success or 0 on error.
- */
-u32 omap2_clksel_get_divisor(struct clk *clk)
-{
-	u32 v;
-
-	if (!clk->clksel_mask)
-		return 0;
-
-	v = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
-	v >>= __ffs(clk->clksel_mask);
-
-	return omap2_clksel_to_divisor(clk, v);
-}
-
 int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
 {
-	u32 v, field_val, validrate, new_div = 0;
+	u32 field_val, validrate, new_div = 0;
 
-	if (!clk->clksel_mask)
+	if (!clk->clksel || !clk->clksel_mask)
 		return -EINVAL;
 
 	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
 	if (validrate != rate)
 		return -EINVAL;
 
-	field_val = omap2_divisor_to_clksel(clk, new_div);
+	field_val = _divisor_to_clksel(clk, new_div);
 	if (field_val == ~0)
 		return -EINVAL;
 
-	v = __raw_readl(clk->clksel_reg);
-	v &= ~clk->clksel_mask;
-	v |= field_val << __ffs(clk->clksel_mask);
-	__raw_writel(v, clk->clksel_reg);
-	v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+	_write_clksel_reg(clk, field_val);
 
 	clk->rate = clk->parent->rate / new_div;
 
+	pr_debug("clock: %s: set rate to %ld\n", clk->name, clk->rate);
+
 	return 0;
 }
 
+/*
+ * Clksel parent setting function - not passed in struct clk function
+ * pointer - instead, the OMAP clock code currently assumes that any
+ * parent-setting clock is a clksel clock, and calls
+ * omap2_clksel_set_parent() by default
+ */
+
+/**
+ * omap2_clksel_set_parent() - change a clock's parent clock
+ * @clk: struct clk * of the child clock
+ * @new_parent: struct clk * of the new parent clock
+ *
+ * This function is intended to be called only by the clock framework.
+ * Change the parent clock of clock @clk to @new_parent.  This is
+ * intended to be used while @clk is disabled.  This function does not
+ * currently check the usecount of the clock, so if multiple drivers
+ * are using the clock, and the parent is changed, they will all be
+ * affected without any notification.  Returns -EINVAL upon error, or
+ * 0 upon success.
+ */
 int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent)
 {
-	u32 field_val, v, parent_div;
+	u32 field_val = 0;
+	u32 parent_div;
 
-	if (!clk->clksel)
+	if (!clk->clksel || !clk->clksel_mask)
 		return -EINVAL;
 
-	parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val);
+	parent_div = _get_div_and_fieldval(new_parent, clk, &field_val);
 	if (!parent_div)
 		return -EINVAL;
 
-	/* Set new source value (previous dividers if any in effect) */
-	v = __raw_readl(clk->clksel_reg);
-	v &= ~clk->clksel_mask;
-	v |= field_val << __ffs(clk->clksel_mask);
-	__raw_writel(v, clk->clksel_reg);
-	v = __raw_readl(clk->clksel_reg);    /* OCP barrier */
+	_write_clksel_reg(clk, field_val);
 
 	clk_reparent(clk, new_parent);
 
@@ -402,7 +502,7 @@
 	if (parent_div > 0)
 		clk->rate /= parent_div;
 
-	pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
+	pr_debug("clock: %s: set parent to %s (new rate %ld)\n",
 		 clk->name, clk->parent->name, clk->rate);
 
 	return 0;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index a6d0b34..605f531 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -334,6 +334,15 @@
 	return ret;
 }
 
+/* Given a clock and a rate apply a clock specific rounding function */
+long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	if (clk->round_rate)
+		return clk->round_rate(clk, rate);
+
+	return clk->rate;
+}
+
 /* Set the clock rate for a clock source */
 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
 {
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index ad8a1f7..a535c7a 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -73,19 +73,20 @@
 #define omap2_clk_disable_unused	NULL
 #endif
 
-unsigned long omap2_clksel_recalc(struct clk *clk);
 void omap2_init_clk_clkdm(struct clk *clk);
-void omap2_init_clksel_parent(struct clk *clk);
-u32 omap2_clksel_get_divisor(struct clk *clk);
+
+/* clkt_clksel.c public functions */
 u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
 				u32 *new_div);
-u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val);
-u32 omap2_divisor_to_clksel(struct clk *clk, u32 div);
+void omap2_init_clksel_parent(struct clk *clk);
+unsigned long omap2_clksel_recalc(struct clk *clk);
 long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate);
 int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
 int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent);
+
 u32 omap2_get_dpll_rate(struct clk *clk);
 void omap2_init_dpll_parent(struct clk *clk);
+
 int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name);
 
 
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index d932b14..37d65d6 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -155,12 +155,12 @@
 /* func_54m_ck */
 
 static const struct clksel_rate func_54m_apll54_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate func_54m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
@@ -177,7 +177,7 @@
 	.clkdm_name	= "wkup_clkdm",
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP24XX_54M_SOURCE,
+	.clksel_mask	= OMAP24XX_54M_SOURCE_MASK,
 	.clksel		= func_54m_clksel,
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -201,12 +201,12 @@
 /* func_48m_ck */
 
 static const struct clksel_rate func_48m_apll96_rates[] = {
-	{ .div = 2, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate func_48m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
@@ -223,7 +223,7 @@
 	.clkdm_name	= "wkup_clkdm",
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP24XX_48M_SOURCE,
+	.clksel_mask	= OMAP24XX_48M_SOURCE_MASK,
 	.clksel		= func_48m_clksel,
 	.recalc		= &omap2_clksel_recalc,
 	.round_rate	= &omap2_clksel_round_rate,
@@ -256,22 +256,22 @@
  * flags fields, which mark them as 2420-only.
  */
 static const struct clksel_rate common_clkout_src_core_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_sys_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_96m_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_54m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -300,7 +300,7 @@
 };
 
 static const struct clksel_rate common_clkout_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 8, .val = 3, .flags = RATE_IN_24XX },
@@ -384,7 +384,7 @@
  *
  */
 static const struct clksel_rate mpu_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_242X },
 	{ .div = 6, .val = 6, .flags = RATE_IN_242X },
@@ -420,7 +420,7 @@
  * routed into a synchronizer and out of clocks abc.
  */
 static const struct clksel_rate dsp_fck_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
@@ -450,7 +450,7 @@
 
 /* DSP interface clock */
 static const struct clksel_rate dsp_irate_ick_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
@@ -532,7 +532,7 @@
 static const struct clksel_rate core_l3_core_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_242X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
 	{ .div = 8, .val = 8, .flags = RATE_IN_242X },
 	{ .div = 12, .val = 12, .flags = RATE_IN_242X },
@@ -559,7 +559,7 @@
 /* usb_l4_ick */
 static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
@@ -591,7 +591,7 @@
  * this domain.
  */
 static const struct clksel_rate l4_core_l3_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
@@ -622,7 +622,7 @@
  */
 static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 6, .val = 6, .flags = RATE_IN_242X },
@@ -730,7 +730,7 @@
 /* XXX Add RATE_NOT_VALIDATED */
 
 static const struct clksel_rate dss1_fck_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -744,7 +744,7 @@
 	{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
 	{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
 	{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
-	{ .div = 16, .val = 16, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 16, .val = 16, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -779,12 +779,12 @@
 };
 
 static const struct clksel_rate dss2_fck_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate dss2_fck_48m_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -825,7 +825,7 @@
  * functional clock parents.
  */
 static const struct clksel_rate gpt_alt_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -1588,7 +1588,7 @@
 };
 
 static const struct clksel_rate vlynq_fck_96m_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_242X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_242X },
 	{ .div = 0 }
 };
 
@@ -1601,7 +1601,7 @@
 	{ .div = 8, .val = 8, .flags = RATE_IN_242X },
 	{ .div = 9, .val = 9, .flags = RATE_IN_242X },
 	{ .div = 12, .val = 12, .flags = RATE_IN_242X },
-	{ .div = 16, .val = 16, .flags = RATE_IN_242X | DEFAULT_RATE },
+	{ .div = 16, .val = 16, .flags = RATE_IN_242X },
 	{ .div = 18, .val = 18, .flags = RATE_IN_242X },
 	{ .div = 0 }
 };
@@ -1836,7 +1836,7 @@
 	CLK(NULL,	"vlynq_ick",	&vlynq_ick,	CK_242X),
 	CLK(NULL,	"vlynq_fck",	&vlynq_fck,	CK_242X),
 	CLK(NULL,	"des_ick",	&des_ick,	CK_242X),
-	CLK(NULL,	"sha_ick",	&sha_ick,	CK_242X),
+	CLK("omap-sham",	"ick",	&sha_ick,	CK_242X),
 	CLK("omap_rng",	"ick",		&rng_ick,	CK_242X),
 	CLK(NULL,	"aes_ick",	&aes_ick,	CK_242X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 0438b6e..b33118f 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -155,12 +155,12 @@
 /* func_54m_ck */
 
 static const struct clksel_rate func_54m_apll54_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate func_54m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
@@ -177,7 +177,7 @@
 	.clkdm_name	= "wkup_clkdm",
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP24XX_54M_SOURCE,
+	.clksel_mask	= OMAP24XX_54M_SOURCE_MASK,
 	.clksel		= func_54m_clksel,
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -192,12 +192,12 @@
 
 /* func_96m_ck */
 static const struct clksel_rate func_96m_apll96_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate func_96m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_243X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_243X },
 	{ .div = 0 },
 };
 
@@ -214,7 +214,7 @@
 	.clkdm_name	= "wkup_clkdm",
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP2430_96M_SOURCE,
+	.clksel_mask	= OMAP2430_96M_SOURCE_MASK,
 	.clksel		= func_96m_clksel,
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -222,12 +222,12 @@
 /* func_48m_ck */
 
 static const struct clksel_rate func_48m_apll96_rates[] = {
-	{ .div = 2, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate func_48m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
 
@@ -244,7 +244,7 @@
 	.clkdm_name	= "wkup_clkdm",
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP24XX_48M_SOURCE,
+	.clksel_mask	= OMAP24XX_48M_SOURCE_MASK,
 	.clksel		= func_48m_clksel,
 	.recalc		= &omap2_clksel_recalc,
 	.round_rate	= &omap2_clksel_round_rate,
@@ -277,22 +277,22 @@
  * flags fields, which mark them as 2420-only.
  */
 static const struct clksel_rate common_clkout_src_core_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_sys_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_96m_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_clkout_src_54m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -321,7 +321,7 @@
 };
 
 static const struct clksel_rate common_clkout_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 8, .val = 3, .flags = RATE_IN_24XX },
@@ -369,7 +369,7 @@
  *
  */
 static const struct clksel_rate mpu_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 },
 };
@@ -402,7 +402,7 @@
  * routed into a synchronizer and out of clocks abc.
  */
 static const struct clksel_rate dsp_fck_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
@@ -429,7 +429,7 @@
 
 /* DSP interface clock */
 static const struct clksel_rate dsp_irate_ick_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 3, .val = 3, .flags = RATE_IN_243X },
 	{ .div = 0 },
@@ -481,7 +481,7 @@
  */
 static const struct clksel_rate core_l3_core_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
@@ -505,7 +505,7 @@
 /* usb_l4_ick */
 static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
@@ -537,7 +537,7 @@
  * this domain.
  */
 static const struct clksel_rate l4_core_l3_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
@@ -568,7 +568,7 @@
  */
 static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
 	{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
 	{ .div = 5, .val = 5, .flags = RATE_IN_243X },
@@ -673,7 +673,7 @@
  */
 static const struct clksel_rate mdm_ick_core_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_243X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_243X | DEFAULT_RATE },
+	{ .div = 4, .val = 4, .flags = RATE_IN_243X },
 	{ .div = 6, .val = 6, .flags = RATE_IN_243X },
 	{ .div = 9, .val = 9, .flags = RATE_IN_243X },
 	{ .div = 0 }
@@ -718,7 +718,7 @@
 /* XXX Add RATE_NOT_VALIDATED */
 
 static const struct clksel_rate dss1_fck_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -732,7 +732,7 @@
 	{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
 	{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
 	{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
-	{ .div = 16, .val = 16, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 16, .val = 16, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -767,12 +767,12 @@
 };
 
 static const struct clksel_rate dss2_fck_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate dss2_fck_48m_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -813,7 +813,7 @@
  * functional clock parents.
  */
 static const struct clksel_rate gpt_alt_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
 	{ .div = 0 }
 };
 
@@ -1924,7 +1924,7 @@
 	CLK(NULL,	"sdma_ick",	&sdma_ick,	CK_243X),
 	CLK(NULL,	"sdrc_ick",	&sdrc_ick,	CK_243X),
 	CLK(NULL,	"des_ick",	&des_ick,	CK_243X),
-	CLK(NULL,	"sha_ick",	&sha_ick,	CK_243X),
+	CLK("omap-sham",	"ick",	&sha_ick,	CK_243X),
 	CLK("omap_rng",	"ick",		&rng_ick,	CK_243X),
 	CLK(NULL,	"aes_ick",	&aes_ick,	CK_243X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 9cba556..41b155a 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -110,32 +110,32 @@
 };
 
 static const struct clksel_rate osc_sys_12m_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate osc_sys_13m_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate osc_sys_16_8m_rates[] = {
-	{ .div = 1, .val = 5, .flags = RATE_IN_3430ES2 | DEFAULT_RATE },
+	{ .div = 1, .val = 5, .flags = RATE_IN_3430ES2PLUS },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate osc_sys_19_2m_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate osc_sys_26m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate osc_sys_38_4m_rates[] = {
-	{ .div = 1, .val = 4, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 4, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -163,8 +163,8 @@
 };
 
 static const struct clksel_rate div2_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -213,42 +213,42 @@
 /* CM CLOCKS */
 
 static const struct clksel_rate div16_dpll_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 3, .val = 3, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_343X },
-	{ .div = 5, .val = 5, .flags = RATE_IN_343X },
-	{ .div = 6, .val = 6, .flags = RATE_IN_343X },
-	{ .div = 7, .val = 7, .flags = RATE_IN_343X },
-	{ .div = 8, .val = 8, .flags = RATE_IN_343X },
-	{ .div = 9, .val = 9, .flags = RATE_IN_343X },
-	{ .div = 10, .val = 10, .flags = RATE_IN_343X },
-	{ .div = 11, .val = 11, .flags = RATE_IN_343X },
-	{ .div = 12, .val = 12, .flags = RATE_IN_343X },
-	{ .div = 13, .val = 13, .flags = RATE_IN_343X },
-	{ .div = 14, .val = 14, .flags = RATE_IN_343X },
-	{ .div = 15, .val = 15, .flags = RATE_IN_343X },
-	{ .div = 16, .val = 16, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+	{ .div = 5, .val = 5, .flags = RATE_IN_3XXX },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+	{ .div = 7, .val = 7, .flags = RATE_IN_3XXX },
+	{ .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+	{ .div = 9, .val = 9, .flags = RATE_IN_3XXX },
+	{ .div = 10, .val = 10, .flags = RATE_IN_3XXX },
+	{ .div = 11, .val = 11, .flags = RATE_IN_3XXX },
+	{ .div = 12, .val = 12, .flags = RATE_IN_3XXX },
+	{ .div = 13, .val = 13, .flags = RATE_IN_3XXX },
+	{ .div = 14, .val = 14, .flags = RATE_IN_3XXX },
+	{ .div = 15, .val = 15, .flags = RATE_IN_3XXX },
+	{ .div = 16, .val = 16, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
-static const struct clksel_rate div32_dpll4_rates_3630[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_36XX | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_36XX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_36XX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_36XX },
-	{ .div = 5, .val = 5, .flags = RATE_IN_36XX },
-	{ .div = 6, .val = 6, .flags = RATE_IN_36XX },
-	{ .div = 7, .val = 7, .flags = RATE_IN_36XX },
-	{ .div = 8, .val = 8, .flags = RATE_IN_36XX },
-	{ .div = 9, .val = 9, .flags = RATE_IN_36XX },
-	{ .div = 10, .val = 10, .flags = RATE_IN_36XX },
-	{ .div = 11, .val = 11, .flags = RATE_IN_36XX },
-	{ .div = 12, .val = 12, .flags = RATE_IN_36XX },
-	{ .div = 13, .val = 13, .flags = RATE_IN_36XX },
-	{ .div = 14, .val = 14, .flags = RATE_IN_36XX },
-	{ .div = 15, .val = 15, .flags = RATE_IN_36XX },
-	{ .div = 16, .val = 16, .flags = RATE_IN_36XX },
+static const struct clksel_rate dpll4_rates[] = {
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+	{ .div = 5, .val = 5, .flags = RATE_IN_3XXX },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+	{ .div = 7, .val = 7, .flags = RATE_IN_3XXX },
+	{ .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+	{ .div = 9, .val = 9, .flags = RATE_IN_3XXX },
+	{ .div = 10, .val = 10, .flags = RATE_IN_3XXX },
+	{ .div = 11, .val = 11, .flags = RATE_IN_3XXX },
+	{ .div = 12, .val = 12, .flags = RATE_IN_3XXX },
+	{ .div = 13, .val = 13, .flags = RATE_IN_3XXX },
+	{ .div = 14, .val = 14, .flags = RATE_IN_3XXX },
+	{ .div = 15, .val = 15, .flags = RATE_IN_3XXX },
+	{ .div = 16, .val = 16, .flags = RATE_IN_3XXX },
 	{ .div = 17, .val = 17, .flags = RATE_IN_36XX },
 	{ .div = 18, .val = 18, .flags = RATE_IN_36XX },
 	{ .div = 19, .val = 19, .flags = RATE_IN_36XX },
@@ -450,37 +450,37 @@
 };
 
 static const struct clksel_rate div31_dpll3_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 3, .val = 3, .flags = RATE_IN_3430ES2 },
-	{ .div = 4, .val = 4, .flags = RATE_IN_3430ES2 },
-	{ .div = 5, .val = 5, .flags = RATE_IN_3430ES2 },
-	{ .div = 6, .val = 6, .flags = RATE_IN_3430ES2 },
-	{ .div = 7, .val = 7, .flags = RATE_IN_3430ES2 },
-	{ .div = 8, .val = 8, .flags = RATE_IN_3430ES2 },
-	{ .div = 9, .val = 9, .flags = RATE_IN_3430ES2 },
-	{ .div = 10, .val = 10, .flags = RATE_IN_3430ES2 },
-	{ .div = 11, .val = 11, .flags = RATE_IN_3430ES2 },
-	{ .div = 12, .val = 12, .flags = RATE_IN_3430ES2 },
-	{ .div = 13, .val = 13, .flags = RATE_IN_3430ES2 },
-	{ .div = 14, .val = 14, .flags = RATE_IN_3430ES2 },
-	{ .div = 15, .val = 15, .flags = RATE_IN_3430ES2 },
-	{ .div = 16, .val = 16, .flags = RATE_IN_3430ES2 },
-	{ .div = 17, .val = 17, .flags = RATE_IN_3430ES2 },
-	{ .div = 18, .val = 18, .flags = RATE_IN_3430ES2 },
-	{ .div = 19, .val = 19, .flags = RATE_IN_3430ES2 },
-	{ .div = 20, .val = 20, .flags = RATE_IN_3430ES2 },
-	{ .div = 21, .val = 21, .flags = RATE_IN_3430ES2 },
-	{ .div = 22, .val = 22, .flags = RATE_IN_3430ES2 },
-	{ .div = 23, .val = 23, .flags = RATE_IN_3430ES2 },
-	{ .div = 24, .val = 24, .flags = RATE_IN_3430ES2 },
-	{ .div = 25, .val = 25, .flags = RATE_IN_3430ES2 },
-	{ .div = 26, .val = 26, .flags = RATE_IN_3430ES2 },
-	{ .div = 27, .val = 27, .flags = RATE_IN_3430ES2 },
-	{ .div = 28, .val = 28, .flags = RATE_IN_3430ES2 },
-	{ .div = 29, .val = 29, .flags = RATE_IN_3430ES2 },
-	{ .div = 30, .val = 30, .flags = RATE_IN_3430ES2 },
-	{ .div = 31, .val = 31, .flags = RATE_IN_3430ES2 },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 5, .val = 5, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 7, .val = 7, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 8, .val = 8, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 9, .val = 9, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 10, .val = 10, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 11, .val = 11, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 12, .val = 12, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 13, .val = 13, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 14, .val = 14, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 15, .val = 15, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 16, .val = 16, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 17, .val = 17, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 18, .val = 18, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 19, .val = 19, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 20, .val = 20, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 21, .val = 21, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 22, .val = 22, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 23, .val = 23, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 24, .val = 24, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 25, .val = 25, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 26, .val = 26, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 27, .val = 27, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 28, .val = 28, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 29, .val = 29, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 30, .val = 30, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 31, .val = 31, .flags = RATE_IN_3430ES2PLUS },
 	{ .div = 0 },
 };
 
@@ -562,6 +562,7 @@
 /* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
 /* Type: DPLL */
 static struct dpll_data dpll4_dd;
+
 static struct dpll_data dpll4_dd_34xx __initdata = {
 	.mult_div1_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
 	.mult_mask	= OMAP3430_PERIPH_DPLL_MULT_MASK,
@@ -632,39 +633,20 @@
 	.recalc		= &omap3_clkoutx2_recalc,
 };
 
-static const struct clksel div16_dpll4_clksel[] = {
-	{ .parent = &dpll4_ck, .rates = div16_dpll_rates },
-	{ .parent = NULL }
-};
-
-static const struct clksel div32_dpll4_clksel[] = {
-	{ .parent = &dpll4_ck, .rates = div32_dpll4_rates_3630 },
+static const struct clksel dpll4_clksel[] = {
+	{ .parent = &dpll4_ck, .rates = dpll4_rates },
 	{ .parent = NULL }
 };
 
 /* This virtual clock is the source for dpll4_m2x2_ck */
-static struct clk dpll4_m2_ck;
-
-static struct clk dpll4_m2_ck_34xx __initdata = {
-	.name		= "dpll4_m2_ck",
-	.ops		= &clkops_null,
-	.parent		= &dpll4_ck,
-	.init		= &omap2_init_clksel_parent,
-	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
-	.clksel_mask	= OMAP3430_DIV_96M_MASK,
-	.clksel		= div16_dpll4_clksel,
-	.clkdm_name	= "dpll4_clkdm",
-	.recalc		= &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m2_ck_3630 __initdata  = {
+static struct clk dpll4_m2_ck = {
 	.name		= "dpll4_m2_ck",
 	.ops		= &clkops_null,
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
 	.clksel_mask	= OMAP3630_DIV_96M_MASK,
-	.clksel		= div32_dpll4_clksel,
+	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -698,7 +680,7 @@
 
 static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_36XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_36XX | DEFAULT_RATE },
+	{ .div = 2, .val = 2, .flags = RATE_IN_36XX },
 	{ .div = 0 }
 };
 
@@ -708,12 +690,12 @@
 };
 
 static const struct clksel_rate omap_96m_dpll_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate omap_96m_sys_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -760,28 +742,14 @@
 };
 
 /* This virtual clock is the source for dpll4_m3x2_ck */
-static struct clk dpll4_m3_ck;
-
-static struct clk dpll4_m3_ck_34xx __initdata = {
+static struct clk dpll4_m3_ck = {
 	.name		= "dpll4_m3_ck",
 	.ops		= &clkops_null,
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
 	.clksel_mask	= OMAP3430_CLKSEL_TV_MASK,
-	.clksel		= div16_dpll4_clksel,
-	.clkdm_name	= "dpll4_clkdm",
-	.recalc		= &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m3_ck_3630 __initdata = {
-	.name		= "dpll4_m3_ck",
-	.ops		= &clkops_null,
-	.parent		= &dpll4_ck,
-	.init		= &omap2_init_clksel_parent,
-	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3630_CLKSEL_TV_MASK,
-	.clksel		= div32_dpll4_clksel,
+	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -799,12 +767,12 @@
 };
 
 static const struct clksel_rate omap_54m_d4m3x2_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate omap_54m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -825,12 +793,12 @@
 };
 
 static const struct clksel_rate omap_48m_cm96m_rates[] = {
-	{ .div = 2, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 2, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate omap_48m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -858,31 +826,15 @@
 	.recalc		= &omap_fixed_divisor_recalc,
 };
 
-/* This virstual clock is the source for dpll4_m4x2_ck */
-static struct clk dpll4_m4_ck;
-
-static struct clk dpll4_m4_ck_34xx __initdata = {
+/* This virtual clock is the source for dpll4_m4x2_ck */
+static struct clk dpll4_m4_ck = {
 	.name		= "dpll4_m4_ck",
 	.ops		= &clkops_null,
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
 	.clksel_mask	= OMAP3430_CLKSEL_DSS1_MASK,
-	.clksel		= div16_dpll4_clksel,
-	.clkdm_name	= "dpll4_clkdm",
-	.recalc		= &omap2_clksel_recalc,
-	.set_rate	= &omap2_clksel_set_rate,
-	.round_rate	= &omap2_clksel_round_rate,
-};
-
-static struct clk dpll4_m4_ck_3630 __initdata = {
-	.name		= "dpll4_m4_ck",
-	.ops		= &clkops_null,
-	.parent		= &dpll4_ck,
-	.init		= &omap2_init_clksel_parent,
-	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3630_CLKSEL_DSS1_MASK,
-	.clksel		= div32_dpll4_clksel,
+	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
 	.set_rate	= &omap2_clksel_set_rate,
@@ -902,30 +854,14 @@
 };
 
 /* This virtual clock is the source for dpll4_m5x2_ck */
-static struct clk dpll4_m5_ck;
-
-static struct clk dpll4_m5_ck_34xx __initdata = {
+static struct clk dpll4_m5_ck = {
 	.name		= "dpll4_m5_ck",
 	.ops		= &clkops_null,
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
 	.clksel_mask	= OMAP3430_CLKSEL_CAM_MASK,
-	.clksel		= div16_dpll4_clksel,
-	.clkdm_name	= "dpll4_clkdm",
-	.set_rate	= &omap2_clksel_set_rate,
-	.round_rate	= &omap2_clksel_round_rate,
-	.recalc		= &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m5_ck_3630 __initdata = {
-	.name		= "dpll4_m5_ck",
-	.ops		= &clkops_null,
-	.parent		= &dpll4_ck,
-	.init		= &omap2_init_clksel_parent,
-	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3630_CLKSEL_CAM_MASK,
-	.clksel		= div32_dpll4_clksel,
+	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.set_rate	= &omap2_clksel_set_rate,
 	.round_rate	= &omap2_clksel_round_rate,
@@ -945,28 +881,14 @@
 };
 
 /* This virtual clock is the source for dpll4_m6x2_ck */
-static struct clk dpll4_m6_ck;
-
-static struct clk dpll4_m6_ck_34xx __initdata = {
+static struct clk dpll4_m6_ck = {
 	.name		= "dpll4_m6_ck",
 	.ops		= &clkops_null,
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
 	.clksel_mask	= OMAP3430_DIV_DPLL4_MASK,
-	.clksel		= div16_dpll4_clksel,
-	.clkdm_name	= "dpll4_clkdm",
-	.recalc		= &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m6_ck_3630 __initdata = {
-	.name		= "dpll4_m6_ck",
-	.ops		= &clkops_null,
-	.parent		= &dpll4_ck,
-	.init		= &omap2_init_clksel_parent,
-	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP3630_DIV_DPLL4_MASK,
-	.clksel		= div32_dpll4_clksel,
+	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
 };
@@ -1049,22 +971,22 @@
 /* CM EXTERNAL CLOCK OUTPUTS */
 
 static const struct clksel_rate clkout2_src_core_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate clkout2_src_sys_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate clkout2_src_96m_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate clkout2_src_54m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -1090,11 +1012,11 @@
 };
 
 static const struct clksel_rate sys_clkout2_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 1, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 8, .val = 3, .flags = RATE_IN_343X },
-	{ .div = 16, .val = 4, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 8, .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 16, .val = 4, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -1111,6 +1033,8 @@
 	.clksel_mask	= OMAP3430_CLKOUT2_DIV_MASK,
 	.clksel		= sys_clkout2_clksel,
 	.recalc		= &omap2_clksel_recalc,
+	.round_rate	= &omap2_clksel_round_rate,
+	.set_rate	= &omap2_clksel_set_rate
 };
 
 /* CM OUTPUT CLOCKS */
@@ -1125,9 +1049,9 @@
 /* DPLL power domain clock controls */
 
 static const struct clksel_rate div4_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -1161,8 +1085,8 @@
 
 /* arm_fck is divided by two when DPLL1 locked; otherwise, passthrough mpu_ck */
 static const struct clksel_rate arm_fck_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 1, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -1333,25 +1257,25 @@
 
 static const struct clksel_rate sgx_core_rates[] = {
 	{ .div = 2, .val = 5, .flags = RATE_IN_36XX },
-	{ .div = 3, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 4, .val = 1, .flags = RATE_IN_343X },
-	{ .div = 6, .val = 2, .flags = RATE_IN_343X },
+	{ .div = 3, .val = 0, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 6, .val = 2, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate sgx_192m_rates[] = {
-	{ .div = 1,  .val = 4, .flags = RATE_IN_36XX | DEFAULT_RATE },
+	{ .div = 1,  .val = 4, .flags = RATE_IN_36XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate sgx_corex2_rates[] = {
-	{ .div = 3, .val = 6, .flags = RATE_IN_36XX | DEFAULT_RATE },
+	{ .div = 3, .val = 6, .flags = RATE_IN_36XX },
 	{ .div = 5, .val = 7, .flags = RATE_IN_36XX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate sgx_96m_rates[] = {
-	{ .div = 1,  .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1,  .val = 3, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -1576,12 +1500,12 @@
  * MCBSP 2, 3, 4 get their 96MHz clock from per_96m_fck.
  */
 static const struct clksel_rate common_mcbsp_96m_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
 static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -1714,12 +1638,12 @@
 /* DPLL3-derived clock */
 
 static const struct clksel_rate ssi_ssr_corex2_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 3, .val = 3, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_343X },
-	{ .div = 6, .val = 6, .flags = RATE_IN_343X },
-	{ .div = 8, .val = 8, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+	{ .div = 8, .val = 8, .flags = RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
@@ -2353,18 +2277,18 @@
 /* WKUP */
 
 static const struct clksel_rate usim_96m_rates[] = {
-	{ .div = 2,  .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 4,  .val = 4, .flags = RATE_IN_343X },
-	{ .div = 8,  .val = 5, .flags = RATE_IN_343X },
-	{ .div = 10, .val = 6, .flags = RATE_IN_343X },
+	{ .div = 2,  .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 4,  .val = 4, .flags = RATE_IN_3XXX },
+	{ .div = 8,  .val = 5, .flags = RATE_IN_3XXX },
+	{ .div = 10, .val = 6, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate usim_120m_rates[] = {
-	{ .div = 4,  .val = 7,	.flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 8,  .val = 8,	.flags = RATE_IN_343X },
-	{ .div = 16, .val = 9,	.flags = RATE_IN_343X },
-	{ .div = 20, .val = 10, .flags = RATE_IN_343X },
+	{ .div = 4,  .val = 7,	.flags = RATE_IN_3XXX },
+	{ .div = 8,  .val = 8,	.flags = RATE_IN_3XXX },
+	{ .div = 16, .val = 9,	.flags = RATE_IN_3XXX },
+	{ .div = 20, .val = 10, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -2951,22 +2875,22 @@
 /* More information: ARM Cortex-A8 Technical Reference Manual, sect 10.1 */
 
 static const struct clksel_rate emu_src_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate emu_src_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate emu_src_per_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 2, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
 static const struct clksel_rate emu_src_mpu_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -2995,10 +2919,10 @@
 };
 
 static const struct clksel_rate pclk_emu_rates[] = {
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 3, .val = 3, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_343X },
-	{ .div = 6, .val = 6, .flags = RATE_IN_343X },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -3019,9 +2943,9 @@
 };
 
 static const struct clksel_rate pclkx2_emu_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 3, .val = 3, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -3069,9 +2993,9 @@
 };
 
 static const struct clksel_rate traceclk_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 2, .val = 2, .flags = RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
 	{ .div = 0 },
 };
 
@@ -3360,7 +3284,7 @@
 	CLK("mmci-omap-hs.2",	"ick",	&mmchs3_ick,	CK_3430ES2 | CK_AM35XX),
 	CLK(NULL,	"icr_ick",	&icr_ick,	CK_343X),
 	CLK(NULL,	"aes2_ick",	&aes2_ick,	CK_343X),
-	CLK(NULL,	"sha12_ick",	&sha12_ick,	CK_343X),
+	CLK("omap-sham",	"ick",	&sha12_ick,	CK_343X),
 	CLK(NULL,	"des2_ick",	&des2_ick,	CK_343X),
 	CLK("mmci-omap-hs.1",	"ick",	&mmchs2_ick,	CK_3XXX),
 	CLK("mmci-omap-hs.0",	"ick",	&mmchs1_ick,	CK_3XXX),
@@ -3472,8 +3396,8 @@
 	CLK(NULL,	"ipss_ick",	&ipss_ick,	CK_AM35XX),
 	CLK(NULL,	"rmii_ck",	&rmii_ck,	CK_AM35XX),
 	CLK(NULL,	"pclk_ck",	&pclk_ck,	CK_AM35XX),
-	CLK("davinci_emac",	"ick",		&emac_ick,	CK_AM35XX),
-	CLK("davinci_emac",	"fck",		&emac_fck,	CK_AM35XX),
+	CLK("davinci_emac",	"emac_clk",	&emac_ick,	CK_AM35XX),
+	CLK("davinci_emac",	"phy_clk",	&emac_fck,	CK_AM35XX),
 	CLK("vpfe-capture",	"master",	&vpfe_ick,	CK_AM35XX),
 	CLK("vpfe-capture",	"slave",	&vpfe_fck,	CK_AM35XX),
 	CLK("musb_hdrc",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
@@ -3488,14 +3412,8 @@
 	struct omap_clk *c;
 	u32 cpu_clkflg = CK_3XXX;
 
-	if (cpu_is_omap3517()) {
-		cpu_mask = RATE_IN_343X | RATE_IN_3430ES2;
-		cpu_clkflg |= CK_3517;
-	} else if (cpu_is_omap3505()) {
-		cpu_mask = RATE_IN_343X | RATE_IN_3430ES2;
-		cpu_clkflg |= CK_3505;
-	} else if (cpu_is_omap34xx()) {
-		cpu_mask = RATE_IN_343X;
+	if (cpu_is_omap34xx()) {
+		cpu_mask = RATE_IN_3XXX;
 		cpu_clkflg |= CK_343X;
 
 		/*
@@ -3506,10 +3424,17 @@
 			/* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
 			cpu_clkflg |= CK_3430ES1;
 		} else {
-			cpu_mask |= RATE_IN_3430ES2;
+			cpu_mask |= RATE_IN_3430ES2PLUS;
 			cpu_clkflg |= CK_3430ES2;
 		}
+	} else if (cpu_is_omap3517()) {
+		cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+		cpu_clkflg |= CK_3517;
+	} else if (cpu_is_omap3505()) {
+		cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+		cpu_clkflg |= CK_3505;
 	}
+
 	if (omap3_has_192mhz_clk())
 		omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
 
@@ -3520,14 +3445,7 @@
 		/*
 		 * XXX This type of dynamic rewriting of the clock tree is
 		 * deprecated and should be revised soon.
-		 */
-		dpll4_m2_ck = dpll4_m2_ck_3630;
-		dpll4_m3_ck = dpll4_m3_ck_3630;
-		dpll4_m4_ck = dpll4_m4_ck_3630;
-		dpll4_m5_ck = dpll4_m5_ck_3630;
-		dpll4_m6_ck = dpll4_m6_ck_3630;
-
-		/*
+		 *
 		 * For 3630: override clkops_omap2_dflt_wait for the
 		 * clocks affected from PWRDN reset Limitation
 		 */
@@ -3543,18 +3461,12 @@
 				&clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
 		dpll4_m6x2_ck.ops =
 				&clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
-	} else {
-		/*
-		 * XXX This type of dynamic rewriting of the clock tree is
-		 * deprecated and should be revised soon.
-		 */
-		dpll4_m2_ck = dpll4_m2_ck_34xx;
-		dpll4_m3_ck = dpll4_m3_ck_34xx;
-		dpll4_m4_ck = dpll4_m4_ck_34xx;
-		dpll4_m5_ck = dpll4_m5_ck_34xx;
-		dpll4_m6_ck = dpll4_m6_ck_34xx;
 	}
 
+	/*
+	 * XXX This type of dynamic rewriting of the clock tree is
+	 * deprecated and should be revised soon.
+	 */
 	if (cpu_is_omap3630())
 		dpll4_dd = dpll4_dd_3630;
 	else
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index a5c0c9c..0280422 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -2675,6 +2675,11 @@
 	CLK("omap2_mcspi.2",	"ick",			&dummy_ck,	CK_443X),
 	CLK("omap2_mcspi.3",	"ick",			&dummy_ck,	CK_443X),
 	CLK("omap2_mcspi.4",	"ick",			&dummy_ck,	CK_443X),
+	CLK("mmci-omap-hs.0",	"ick",		&dummy_ck,	CK_443X),
+	CLK("mmci-omap-hs.1",	"ick",		&dummy_ck,	CK_443X),
+	CLK("mmci-omap-hs.2",	"ick",		&dummy_ck,	CK_443X),
+	CLK("mmci-omap-hs.3",	"ick",		&dummy_ck,	CK_443X),
+	CLK("mmci-omap-hs.4",	"ick",		&dummy_ck,	CK_443X),
 	CLK(NULL,	"uart1_ick",			&dummy_ck,	CK_443X),
 	CLK(NULL,	"uart2_ick",			&dummy_ck,	CK_443X),
 	CLK(NULL,	"uart3_ick",			&dummy_ck,	CK_443X),
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index f69096b..1cf8131 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -20,20 +20,20 @@
 
 /* clksel_rate data common to 24xx/343x */
 const struct clksel_rate gpt_32k_rates[] = {
-	 { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+	 { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_3XXX },
 	 { .div = 0 }
 };
 
 const struct clksel_rate gpt_sys_rates[] = {
-	 { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+	 { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
 	 { .div = 0 }
 };
 
 const struct clksel_rate gfx_l3_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
-	{ .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_343X },
-	{ .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_343X },
+	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
+	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_3XXX },
+	{ .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_3XXX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_3XXX },
 	{ .div = 0 }
 };
 
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6e568ec..5d80cb8 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -809,7 +809,7 @@
 
 	if (cpu_is_omap24xx()) {
 
-		cm_set_mod_reg_bits(OMAP24XX_FORCESTATE,
+		cm_set_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
 			    clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
 	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
@@ -853,7 +853,7 @@
 
 	if (cpu_is_omap24xx()) {
 
-		cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE,
+		cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
 			      clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
 	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/clockdomains44xx.h b/arch/arm/mach-omap2/clockdomains44xx.h
index 438aaee..7e5ba0f 100644
--- a/arch/arm/mach-omap2/clockdomains44xx.h
+++ b/arch/arm/mach-omap2/clockdomains44xx.h
@@ -131,7 +131,7 @@
 static struct clockdomain mpu0_44xx_clkdm = {
 	.name		  = "mpu0_clkdm",
 	.pwrdm		  = { .name = "cpu0_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_PDA_CPU0_CLKSTCTRL,
+	.clkstctrl_reg	  = OMAP4430_CM_CPU0_CLKSTCTRL,
 	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
 	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -140,7 +140,7 @@
 static struct clockdomain mpu1_44xx_clkdm = {
 	.name		  = "mpu1_clkdm",
 	.pwrdm		  = { .name = "cpu1_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_PDA_CPU1_CLKSTCTRL,
+	.clkstctrl_reg	  = OMAP4430_CM_CPU1_CLKSTCTRL,
 	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
 	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
diff --git a/arch/arm/mach-omap2/cm-regbits-24xx.h b/arch/arm/mach-omap2/cm-regbits-24xx.h
index 297a2fe..da51cc3 100644
--- a/arch/arm/mach-omap2/cm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-24xx.h
@@ -20,43 +20,43 @@
 
 /* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
 #define OMAP24XX_EN_CAM_SHIFT				31
-#define OMAP24XX_EN_CAM					(1 << 31)
+#define OMAP24XX_EN_CAM_MASK				(1 << 31)
 #define OMAP24XX_EN_WDT4_SHIFT				29
-#define OMAP24XX_EN_WDT4				(1 << 29)
+#define OMAP24XX_EN_WDT4_MASK				(1 << 29)
 #define OMAP2420_EN_WDT3_SHIFT				28
-#define OMAP2420_EN_WDT3				(1 << 28)
+#define OMAP2420_EN_WDT3_MASK				(1 << 28)
 #define OMAP24XX_EN_MSPRO_SHIFT				27
-#define OMAP24XX_EN_MSPRO				(1 << 27)
+#define OMAP24XX_EN_MSPRO_MASK				(1 << 27)
 #define OMAP24XX_EN_FAC_SHIFT				25
-#define OMAP24XX_EN_FAC					(1 << 25)
+#define OMAP24XX_EN_FAC_MASK				(1 << 25)
 #define OMAP2420_EN_EAC_SHIFT				24
-#define OMAP2420_EN_EAC					(1 << 24)
+#define OMAP2420_EN_EAC_MASK				(1 << 24)
 #define OMAP24XX_EN_HDQ_SHIFT				23
-#define OMAP24XX_EN_HDQ					(1 << 23)
+#define OMAP24XX_EN_HDQ_MASK				(1 << 23)
 #define OMAP2420_EN_I2C2_SHIFT				20
-#define OMAP2420_EN_I2C2				(1 << 20)
+#define OMAP2420_EN_I2C2_MASK				(1 << 20)
 #define OMAP2420_EN_I2C1_SHIFT				19
-#define OMAP2420_EN_I2C1				(1 << 19)
+#define OMAP2420_EN_I2C1_MASK				(1 << 19)
 
 /* CM_FCLKEN2_CORE and CM_ICLKEN2_CORE shared bits */
 #define OMAP2430_EN_MCBSP5_SHIFT			5
-#define OMAP2430_EN_MCBSP5				(1 << 5)
+#define OMAP2430_EN_MCBSP5_MASK				(1 << 5)
 #define OMAP2430_EN_MCBSP4_SHIFT			4
-#define OMAP2430_EN_MCBSP4				(1 << 4)
+#define OMAP2430_EN_MCBSP4_MASK				(1 << 4)
 #define OMAP2430_EN_MCBSP3_SHIFT			3
-#define OMAP2430_EN_MCBSP3				(1 << 3)
+#define OMAP2430_EN_MCBSP3_MASK				(1 << 3)
 #define OMAP24XX_EN_SSI_SHIFT				1
-#define OMAP24XX_EN_SSI					(1 << 1)
+#define OMAP24XX_EN_SSI_MASK				(1 << 1)
 
 /* CM_FCLKEN_WKUP and CM_ICLKEN_WKUP shared bits */
 #define OMAP24XX_EN_MPU_WDT_SHIFT			3
-#define OMAP24XX_EN_MPU_WDT				(1 << 3)
+#define OMAP24XX_EN_MPU_WDT_MASK			(1 << 3)
 
 /* Bits specific to each register */
 
 /* CM_IDLEST_MPU */
 /* 2430 only */
-#define OMAP2430_ST_MPU					(1 << 0)
+#define OMAP2430_ST_MPU_MASK				(1 << 0)
 
 /* CM_CLKSEL_MPU */
 #define OMAP24XX_CLKSEL_MPU_SHIFT			0
@@ -68,46 +68,46 @@
 
 /* CM_FCLKEN1_CORE specific bits*/
 #define OMAP24XX_EN_TV_SHIFT				2
-#define OMAP24XX_EN_TV					(1 << 2)
+#define OMAP24XX_EN_TV_MASK				(1 << 2)
 #define OMAP24XX_EN_DSS2_SHIFT				1
-#define OMAP24XX_EN_DSS2				(1 << 1)
+#define OMAP24XX_EN_DSS2_MASK				(1 << 1)
 #define OMAP24XX_EN_DSS1_SHIFT				0
-#define OMAP24XX_EN_DSS1				(1 << 0)
+#define OMAP24XX_EN_DSS1_MASK				(1 << 0)
 
 /* CM_FCLKEN2_CORE specific bits */
 #define OMAP2430_EN_I2CHS2_SHIFT			20
-#define OMAP2430_EN_I2CHS2				(1 << 20)
+#define OMAP2430_EN_I2CHS2_MASK				(1 << 20)
 #define OMAP2430_EN_I2CHS1_SHIFT			19
-#define OMAP2430_EN_I2CHS1				(1 << 19)
+#define OMAP2430_EN_I2CHS1_MASK				(1 << 19)
 #define OMAP2430_EN_MMCHSDB2_SHIFT			17
-#define OMAP2430_EN_MMCHSDB2				(1 << 17)
+#define OMAP2430_EN_MMCHSDB2_MASK			(1 << 17)
 #define OMAP2430_EN_MMCHSDB1_SHIFT			16
-#define OMAP2430_EN_MMCHSDB1				(1 << 16)
+#define OMAP2430_EN_MMCHSDB1_MASK			(1 << 16)
 
 /* CM_ICLKEN1_CORE specific bits */
 #define OMAP24XX_EN_MAILBOXES_SHIFT			30
-#define OMAP24XX_EN_MAILBOXES				(1 << 30)
+#define OMAP24XX_EN_MAILBOXES_MASK			(1 << 30)
 #define OMAP24XX_EN_DSS_SHIFT				0
-#define OMAP24XX_EN_DSS					(1 << 0)
+#define OMAP24XX_EN_DSS_MASK				(1 << 0)
 
 /* CM_ICLKEN2_CORE specific bits */
 
 /* CM_ICLKEN3_CORE */
 /* 2430 only */
 #define OMAP2430_EN_SDRC_SHIFT				2
-#define OMAP2430_EN_SDRC				(1 << 2)
+#define OMAP2430_EN_SDRC_MASK				(1 << 2)
 
 /* CM_ICLKEN4_CORE */
 #define OMAP24XX_EN_PKA_SHIFT				4
-#define OMAP24XX_EN_PKA					(1 << 4)
+#define OMAP24XX_EN_PKA_MASK				(1 << 4)
 #define OMAP24XX_EN_AES_SHIFT				3
-#define OMAP24XX_EN_AES					(1 << 3)
+#define OMAP24XX_EN_AES_MASK				(1 << 3)
 #define OMAP24XX_EN_RNG_SHIFT				2
-#define OMAP24XX_EN_RNG					(1 << 2)
+#define OMAP24XX_EN_RNG_MASK				(1 << 2)
 #define OMAP24XX_EN_SHA_SHIFT				1
-#define OMAP24XX_EN_SHA					(1 << 1)
+#define OMAP24XX_EN_SHA_MASK				(1 << 1)
 #define OMAP24XX_EN_DES_SHIFT				0
-#define OMAP24XX_EN_DES					(1 << 0)
+#define OMAP24XX_EN_DES_MASK				(1 << 0)
 
 /* CM_IDLEST1_CORE specific bits */
 #define OMAP24XX_ST_MAILBOXES_SHIFT			30
@@ -138,9 +138,9 @@
 /* CM_IDLEST2_CORE */
 #define OMAP2430_ST_MCBSP5_SHIFT			5
 #define OMAP2430_ST_MCBSP5_MASK				(1 << 5)
-#define OMAP2430_ST_MCBSP4_SHIFT				4
+#define OMAP2430_ST_MCBSP4_SHIFT			4
 #define OMAP2430_ST_MCBSP4_MASK				(1 << 4)
-#define OMAP2430_ST_MCBSP3_SHIFT				3
+#define OMAP2430_ST_MCBSP3_SHIFT			3
 #define OMAP2430_ST_MCBSP3_MASK				(1 << 3)
 #define OMAP24XX_ST_SSI_SHIFT				1
 #define OMAP24XX_ST_SSI_MASK				(1 << 1)
@@ -162,62 +162,62 @@
 #define OMAP24XX_ST_DES_MASK				(1 << 0)
 
 /* CM_AUTOIDLE1_CORE */
-#define OMAP24XX_AUTO_CAM				(1 << 31)
-#define OMAP24XX_AUTO_MAILBOXES				(1 << 30)
-#define OMAP24XX_AUTO_WDT4				(1 << 29)
-#define OMAP2420_AUTO_WDT3				(1 << 28)
-#define OMAP24XX_AUTO_MSPRO				(1 << 27)
-#define OMAP2420_AUTO_MMC				(1 << 26)
-#define OMAP24XX_AUTO_FAC				(1 << 25)
-#define OMAP2420_AUTO_EAC				(1 << 24)
-#define OMAP24XX_AUTO_HDQ				(1 << 23)
-#define OMAP24XX_AUTO_UART2				(1 << 22)
-#define OMAP24XX_AUTO_UART1				(1 << 21)
-#define OMAP24XX_AUTO_I2C2				(1 << 20)
-#define OMAP24XX_AUTO_I2C1				(1 << 19)
-#define OMAP24XX_AUTO_MCSPI2				(1 << 18)
-#define OMAP24XX_AUTO_MCSPI1				(1 << 17)
-#define OMAP24XX_AUTO_MCBSP2				(1 << 16)
-#define OMAP24XX_AUTO_MCBSP1				(1 << 15)
-#define OMAP24XX_AUTO_GPT12				(1 << 14)
-#define OMAP24XX_AUTO_GPT11				(1 << 13)
-#define OMAP24XX_AUTO_GPT10				(1 << 12)
-#define OMAP24XX_AUTO_GPT9				(1 << 11)
-#define OMAP24XX_AUTO_GPT8				(1 << 10)
-#define OMAP24XX_AUTO_GPT7				(1 << 9)
-#define OMAP24XX_AUTO_GPT6				(1 << 8)
-#define OMAP24XX_AUTO_GPT5				(1 << 7)
-#define OMAP24XX_AUTO_GPT4				(1 << 6)
-#define OMAP24XX_AUTO_GPT3				(1 << 5)
-#define OMAP24XX_AUTO_GPT2				(1 << 4)
-#define OMAP2420_AUTO_VLYNQ				(1 << 3)
-#define OMAP24XX_AUTO_DSS				(1 << 0)
+#define OMAP24XX_AUTO_CAM_MASK				(1 << 31)
+#define OMAP24XX_AUTO_MAILBOXES_MASK			(1 << 30)
+#define OMAP24XX_AUTO_WDT4_MASK				(1 << 29)
+#define OMAP2420_AUTO_WDT3_MASK				(1 << 28)
+#define OMAP24XX_AUTO_MSPRO_MASK			(1 << 27)
+#define OMAP2420_AUTO_MMC_MASK				(1 << 26)
+#define OMAP24XX_AUTO_FAC_MASK				(1 << 25)
+#define OMAP2420_AUTO_EAC_MASK				(1 << 24)
+#define OMAP24XX_AUTO_HDQ_MASK				(1 << 23)
+#define OMAP24XX_AUTO_UART2_MASK			(1 << 22)
+#define OMAP24XX_AUTO_UART1_MASK			(1 << 21)
+#define OMAP24XX_AUTO_I2C2_MASK				(1 << 20)
+#define OMAP24XX_AUTO_I2C1_MASK				(1 << 19)
+#define OMAP24XX_AUTO_MCSPI2_MASK			(1 << 18)
+#define OMAP24XX_AUTO_MCSPI1_MASK			(1 << 17)
+#define OMAP24XX_AUTO_MCBSP2_MASK			(1 << 16)
+#define OMAP24XX_AUTO_MCBSP1_MASK			(1 << 15)
+#define OMAP24XX_AUTO_GPT12_MASK			(1 << 14)
+#define OMAP24XX_AUTO_GPT11_MASK			(1 << 13)
+#define OMAP24XX_AUTO_GPT10_MASK			(1 << 12)
+#define OMAP24XX_AUTO_GPT9_MASK				(1 << 11)
+#define OMAP24XX_AUTO_GPT8_MASK				(1 << 10)
+#define OMAP24XX_AUTO_GPT7_MASK				(1 << 9)
+#define OMAP24XX_AUTO_GPT6_MASK				(1 << 8)
+#define OMAP24XX_AUTO_GPT5_MASK				(1 << 7)
+#define OMAP24XX_AUTO_GPT4_MASK				(1 << 6)
+#define OMAP24XX_AUTO_GPT3_MASK				(1 << 5)
+#define OMAP24XX_AUTO_GPT2_MASK				(1 << 4)
+#define OMAP2420_AUTO_VLYNQ_MASK			(1 << 3)
+#define OMAP24XX_AUTO_DSS_MASK				(1 << 0)
 
 /* CM_AUTOIDLE2_CORE */
-#define OMAP2430_AUTO_MDM_INTC				(1 << 11)
-#define OMAP2430_AUTO_GPIO5				(1 << 10)
-#define OMAP2430_AUTO_MCSPI3				(1 << 9)
-#define OMAP2430_AUTO_MMCHS2				(1 << 8)
-#define OMAP2430_AUTO_MMCHS1				(1 << 7)
-#define OMAP2430_AUTO_USBHS				(1 << 6)
-#define OMAP2430_AUTO_MCBSP5				(1 << 5)
-#define OMAP2430_AUTO_MCBSP4				(1 << 4)
-#define OMAP2430_AUTO_MCBSP3				(1 << 3)
-#define OMAP24XX_AUTO_UART3				(1 << 2)
-#define OMAP24XX_AUTO_SSI				(1 << 1)
-#define OMAP24XX_AUTO_USB				(1 << 0)
+#define OMAP2430_AUTO_MDM_INTC_MASK			(1 << 11)
+#define OMAP2430_AUTO_GPIO5_MASK			(1 << 10)
+#define OMAP2430_AUTO_MCSPI3_MASK			(1 << 9)
+#define OMAP2430_AUTO_MMCHS2_MASK			(1 << 8)
+#define OMAP2430_AUTO_MMCHS1_MASK			(1 << 7)
+#define OMAP2430_AUTO_USBHS_MASK			(1 << 6)
+#define OMAP2430_AUTO_MCBSP5_MASK			(1 << 5)
+#define OMAP2430_AUTO_MCBSP4_MASK			(1 << 4)
+#define OMAP2430_AUTO_MCBSP3_MASK			(1 << 3)
+#define OMAP24XX_AUTO_UART3_MASK			(1 << 2)
+#define OMAP24XX_AUTO_SSI_MASK				(1 << 1)
+#define OMAP24XX_AUTO_USB_MASK				(1 << 0)
 
 /* CM_AUTOIDLE3_CORE */
-#define OMAP24XX_AUTO_SDRC				(1 << 2)
-#define OMAP24XX_AUTO_GPMC				(1 << 1)
-#define OMAP24XX_AUTO_SDMA				(1 << 0)
+#define OMAP24XX_AUTO_SDRC_MASK				(1 << 2)
+#define OMAP24XX_AUTO_GPMC_MASK				(1 << 1)
+#define OMAP24XX_AUTO_SDMA_MASK				(1 << 0)
 
 /* CM_AUTOIDLE4_CORE */
-#define OMAP24XX_AUTO_PKA				(1 << 4)
-#define OMAP24XX_AUTO_AES				(1 << 3)
-#define OMAP24XX_AUTO_RNG				(1 << 2)
-#define OMAP24XX_AUTO_SHA				(1 << 1)
-#define OMAP24XX_AUTO_DES				(1 << 0)
+#define OMAP24XX_AUTO_PKA_MASK				(1 << 4)
+#define OMAP24XX_AUTO_AES_MASK				(1 << 3)
+#define OMAP24XX_AUTO_RNG_MASK				(1 << 2)
+#define OMAP24XX_AUTO_SHA_MASK				(1 << 1)
+#define OMAP24XX_AUTO_DES_MASK				(1 << 0)
 
 /* CM_CLKSEL1_CORE */
 #define OMAP24XX_CLKSEL_USB_SHIFT			25
@@ -269,9 +269,9 @@
 
 /* CM_FCLKEN_GFX */
 #define OMAP24XX_EN_3D_SHIFT				2
-#define OMAP24XX_EN_3D					(1 << 2)
+#define OMAP24XX_EN_3D_MASK				(1 << 2)
 #define OMAP24XX_EN_2D_SHIFT				1
-#define OMAP24XX_EN_2D					(1 << 1)
+#define OMAP24XX_EN_2D_MASK				(1 << 1)
 
 /* CM_ICLKEN_GFX specific bits */
 
@@ -287,13 +287,13 @@
 
 /* CM_ICLKEN_WKUP specific bits */
 #define OMAP2430_EN_ICR_SHIFT				6
-#define OMAP2430_EN_ICR					(1 << 6)
+#define OMAP2430_EN_ICR_MASK				(1 << 6)
 #define OMAP24XX_EN_OMAPCTRL_SHIFT			5
-#define OMAP24XX_EN_OMAPCTRL				(1 << 5)
+#define OMAP24XX_EN_OMAPCTRL_MASK			(1 << 5)
 #define OMAP24XX_EN_WDT1_SHIFT				4
-#define OMAP24XX_EN_WDT1				(1 << 4)
+#define OMAP24XX_EN_WDT1_MASK				(1 << 4)
 #define OMAP24XX_EN_32KSYNC_SHIFT			1
-#define OMAP24XX_EN_32KSYNC				(1 << 1)
+#define OMAP24XX_EN_32KSYNC_MASK			(1 << 1)
 
 /* CM_IDLEST_WKUP specific bits */
 #define OMAP2430_ST_ICR_SHIFT				6
@@ -308,12 +308,12 @@
 #define OMAP24XX_ST_32KSYNC_MASK			(1 << 1)
 
 /* CM_AUTOIDLE_WKUP */
-#define OMAP24XX_AUTO_OMAPCTRL				(1 << 5)
-#define OMAP24XX_AUTO_WDT1				(1 << 4)
-#define OMAP24XX_AUTO_MPU_WDT				(1 << 3)
-#define OMAP24XX_AUTO_GPIOS				(1 << 2)
-#define OMAP24XX_AUTO_32KSYNC				(1 << 1)
-#define OMAP24XX_AUTO_GPT1				(1 << 0)
+#define OMAP24XX_AUTO_OMAPCTRL_MASK			(1 << 5)
+#define OMAP24XX_AUTO_WDT1_MASK				(1 << 4)
+#define OMAP24XX_AUTO_MPU_WDT_MASK			(1 << 3)
+#define OMAP24XX_AUTO_GPIOS_MASK			(1 << 2)
+#define OMAP24XX_AUTO_32KSYNC_MASK			(1 << 1)
+#define OMAP24XX_AUTO_GPT1_MASK				(1 << 0)
 
 /* CM_CLKSEL_WKUP */
 #define OMAP24XX_CLKSEL_GPT1_SHIFT			0
@@ -328,12 +328,12 @@
 #define OMAP24XX_EN_DPLL_MASK				(0x3 << 0)
 
 /* CM_IDLEST_CKGEN */
-#define OMAP24XX_ST_54M_APLL				(1 << 9)
-#define OMAP24XX_ST_96M_APLL				(1 << 8)
-#define OMAP24XX_ST_54M_CLK				(1 << 6)
-#define OMAP24XX_ST_12M_CLK				(1 << 5)
-#define OMAP24XX_ST_48M_CLK				(1 << 4)
-#define OMAP24XX_ST_96M_CLK				(1 << 2)
+#define OMAP24XX_ST_54M_APLL_MASK			(1 << 9)
+#define OMAP24XX_ST_96M_APLL_MASK			(1 << 8)
+#define OMAP24XX_ST_54M_CLK_MASK			(1 << 6)
+#define OMAP24XX_ST_12M_CLK_MASK			(1 << 5)
+#define OMAP24XX_ST_48M_CLK_MASK			(1 << 4)
+#define OMAP24XX_ST_96M_CLK_MASK			(1 << 2)
 #define OMAP24XX_ST_CORE_CLK_SHIFT			0
 #define OMAP24XX_ST_CORE_CLK_MASK			(0x3 << 0)
 
@@ -355,11 +355,11 @@
 #define OMAP24XX_DPLL_DIV_SHIFT				8
 #define OMAP24XX_DPLL_DIV_MASK				(0xf << 8)
 #define OMAP24XX_54M_SOURCE_SHIFT			5
-#define OMAP24XX_54M_SOURCE				(1 << 5)
+#define OMAP24XX_54M_SOURCE_MASK			(1 << 5)
 #define OMAP2430_96M_SOURCE_SHIFT			4
-#define OMAP2430_96M_SOURCE				(1 << 4)
+#define OMAP2430_96M_SOURCE_MASK			(1 << 4)
 #define OMAP24XX_48M_SOURCE_SHIFT			3
-#define OMAP24XX_48M_SOURCE				(1 << 3)
+#define OMAP24XX_48M_SOURCE_MASK			(1 << 3)
 #define OMAP2430_ALTCLK_SOURCE_SHIFT			0
 #define OMAP2430_ALTCLK_SOURCE_MASK			(0x7 << 0)
 
@@ -369,29 +369,29 @@
 
 /* CM_FCLKEN_DSP */
 #define OMAP2420_EN_IVA_COP_SHIFT			10
-#define OMAP2420_EN_IVA_COP				(1 << 10)
+#define OMAP2420_EN_IVA_COP_MASK			(1 << 10)
 #define OMAP2420_EN_IVA_MPU_SHIFT			8
-#define OMAP2420_EN_IVA_MPU				(1 << 8)
+#define OMAP2420_EN_IVA_MPU_MASK			(1 << 8)
 #define OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT		0
-#define OMAP24XX_CM_FCLKEN_DSP_EN_DSP			(1 << 0)
+#define OMAP24XX_CM_FCLKEN_DSP_EN_DSP_MASK		(1 << 0)
 
 /* CM_ICLKEN_DSP */
 #define OMAP2420_EN_DSP_IPI_SHIFT			1
-#define OMAP2420_EN_DSP_IPI				(1 << 1)
+#define OMAP2420_EN_DSP_IPI_MASK			(1 << 1)
 
 /* CM_IDLEST_DSP */
-#define OMAP2420_ST_IVA					(1 << 8)
-#define OMAP2420_ST_IPI					(1 << 1)
-#define OMAP24XX_ST_DSP					(1 << 0)
+#define OMAP2420_ST_IVA_MASK				(1 << 8)
+#define OMAP2420_ST_IPI_MASK				(1 << 1)
+#define OMAP24XX_ST_DSP_MASK				(1 << 0)
 
 /* CM_AUTOIDLE_DSP */
-#define OMAP2420_AUTO_DSP_IPI				(1 << 1)
+#define OMAP2420_AUTO_DSP_IPI_MASK			(1 << 1)
 
 /* CM_CLKSEL_DSP */
-#define OMAP2420_SYNC_IVA				(1 << 13)
+#define OMAP2420_SYNC_IVA_MASK				(1 << 13)
 #define OMAP2420_CLKSEL_IVA_SHIFT			8
 #define OMAP2420_CLKSEL_IVA_MASK			(0x1f << 8)
-#define OMAP24XX_SYNC_DSP				(1 << 7)
+#define OMAP24XX_SYNC_DSP_MASK				(1 << 7)
 #define OMAP24XX_CLKSEL_DSP_IF_SHIFT			5
 #define OMAP24XX_CLKSEL_DSP_IF_MASK			(0x3 << 5)
 #define OMAP24XX_CLKSEL_DSP_SHIFT			0
@@ -406,24 +406,24 @@
 /* CM_FCLKEN_MDM */
 /* 2430 only */
 #define OMAP2430_EN_OSC_SHIFT				1
-#define OMAP2430_EN_OSC					(1 << 1)
+#define OMAP2430_EN_OSC_MASK				(1 << 1)
 
 /* CM_ICLKEN_MDM */
 /* 2430 only */
 #define OMAP2430_CM_ICLKEN_MDM_EN_MDM_SHIFT		0
-#define OMAP2430_CM_ICLKEN_MDM_EN_MDM			(1 << 0)
+#define OMAP2430_CM_ICLKEN_MDM_EN_MDM_MASK		(1 << 0)
 
 /* CM_IDLEST_MDM specific bits */
 /* 2430 only */
 
 /* CM_AUTOIDLE_MDM */
 /* 2430 only */
-#define OMAP2430_AUTO_OSC				(1 << 1)
-#define OMAP2430_AUTO_MDM				(1 << 0)
+#define OMAP2430_AUTO_OSC_MASK				(1 << 1)
+#define OMAP2430_AUTO_MDM_MASK				(1 << 0)
 
 /* CM_CLKSEL_MDM */
 /* 2430 only */
-#define OMAP2430_SYNC_MDM				(1 << 4)
+#define OMAP2430_SYNC_MDM_MASK				(1 << 4)
 #define OMAP2430_CLKSEL_MDM_SHIFT			0
 #define OMAP2430_CLKSEL_MDM_MASK			(0xf << 0)
 
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index a3a3ca0..fe82b79 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -21,15 +21,15 @@
 /* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
 #define OMAP3430ES2_EN_MMC3_MASK			(1 << 30)
 #define OMAP3430ES2_EN_MMC3_SHIFT			30
-#define OMAP3430_EN_MSPRO				(1 << 23)
+#define OMAP3430_EN_MSPRO_MASK				(1 << 23)
 #define OMAP3430_EN_MSPRO_SHIFT				23
-#define OMAP3430_EN_HDQ					(1 << 22)
+#define OMAP3430_EN_HDQ_MASK				(1 << 22)
 #define OMAP3430_EN_HDQ_SHIFT				22
-#define OMAP3430ES1_EN_FSHOSTUSB			(1 << 5)
+#define OMAP3430ES1_EN_FSHOSTUSB_MASK			(1 << 5)
 #define OMAP3430ES1_EN_FSHOSTUSB_SHIFT			5
-#define OMAP3430ES1_EN_D2D				(1 << 3)
+#define OMAP3430ES1_EN_D2D_MASK				(1 << 3)
 #define OMAP3430ES1_EN_D2D_SHIFT			3
-#define OMAP3430_EN_SSI					(1 << 0)
+#define OMAP3430_EN_SSI_MASK				(1 << 0)
 #define OMAP3430_EN_SSI_SHIFT				0
 
 /* CM_FCLKEN3_CORE and CM_ICLKEN3_CORE shared bits */
@@ -37,19 +37,19 @@
 #define OMAP3430ES2_EN_USBTLL_MASK			(1 << 2)
 
 /* CM_FCLKEN_WKUP and CM_ICLKEN_WKUP shared bits */
-#define OMAP3430_EN_WDT2				(1 << 5)
+#define OMAP3430_EN_WDT2_MASK				(1 << 5)
 #define OMAP3430_EN_WDT2_SHIFT				5
 
 /* CM_ICLKEN_CAM, CM_FCLKEN_CAM shared bits */
-#define OMAP3430_EN_CAM					(1 << 0)
+#define OMAP3430_EN_CAM_MASK				(1 << 0)
 #define OMAP3430_EN_CAM_SHIFT				0
 
 /* CM_FCLKEN_PER, CM_ICLKEN_PER shared bits */
-#define OMAP3430_EN_WDT3				(1 << 12)
+#define OMAP3430_EN_WDT3_MASK				(1 << 12)
 #define OMAP3430_EN_WDT3_SHIFT				12
 
 /* CM_CLKSEL2_EMU, CM_CLKSEL3_EMU shared bits */
-#define OMAP3430_OVERRIDE_ENABLE			(1 << 19)
+#define OMAP3430_OVERRIDE_ENABLE_MASK			(1 << 19)
 
 
 /* Bits specific to each register */
@@ -69,7 +69,7 @@
 #define OMAP3430_EN_IVA2_DPLL_MASK			(0x7 << 0)
 
 /* CM_IDLEST_IVA2 */
-#define OMAP3430_ST_IVA2				(1 << 0)
+#define OMAP3430_ST_IVA2_MASK				(1 << 0)
 
 /* CM_IDLEST_PLL_IVA2 */
 #define OMAP3430_ST_IVA2_CLK_SHIFT			0
@@ -114,7 +114,7 @@
 #define OMAP3430_EN_MPU_DPLL_MASK			(0x7 << 0)
 
 /* CM_IDLEST_MPU */
-#define OMAP3430_ST_MPU					(1 << 0)
+#define OMAP3430_ST_MPU_MASK				(1 << 0)
 
 /* CM_IDLEST_PLL_MPU */
 #define OMAP3430_ST_MPU_CLK_SHIFT			0
@@ -145,50 +145,50 @@
 #define OMAP3430_CLKACTIVITY_MPU_MASK			(1 << 0)
 
 /* CM_FCLKEN1_CORE specific bits */
-#define OMAP3430_EN_MODEM				(1 << 31)
+#define OMAP3430_EN_MODEM_MASK				(1 << 31)
 #define OMAP3430_EN_MODEM_SHIFT				31
 
 /* CM_ICLKEN1_CORE specific bits */
-#define OMAP3430_EN_ICR					(1 << 29)
+#define OMAP3430_EN_ICR_MASK				(1 << 29)
 #define OMAP3430_EN_ICR_SHIFT				29
-#define OMAP3430_EN_AES2				(1 << 28)
+#define OMAP3430_EN_AES2_MASK				(1 << 28)
 #define OMAP3430_EN_AES2_SHIFT				28
-#define OMAP3430_EN_SHA12				(1 << 27)
+#define OMAP3430_EN_SHA12_MASK				(1 << 27)
 #define OMAP3430_EN_SHA12_SHIFT				27
-#define OMAP3430_EN_DES2				(1 << 26)
+#define OMAP3430_EN_DES2_MASK				(1 << 26)
 #define OMAP3430_EN_DES2_SHIFT				26
-#define OMAP3430ES1_EN_FAC				(1 << 8)
+#define OMAP3430ES1_EN_FAC_MASK				(1 << 8)
 #define OMAP3430ES1_EN_FAC_SHIFT			8
-#define OMAP3430_EN_MAILBOXES				(1 << 7)
+#define OMAP3430_EN_MAILBOXES_MASK			(1 << 7)
 #define OMAP3430_EN_MAILBOXES_SHIFT			7
-#define OMAP3430_EN_OMAPCTRL				(1 << 6)
+#define OMAP3430_EN_OMAPCTRL_MASK			(1 << 6)
 #define OMAP3430_EN_OMAPCTRL_SHIFT			6
-#define OMAP3430_EN_SAD2D				(1 << 3)
+#define OMAP3430_EN_SAD2D_MASK				(1 << 3)
 #define OMAP3430_EN_SAD2D_SHIFT				3
-#define OMAP3430_EN_SDRC				(1 << 1)
+#define OMAP3430_EN_SDRC_MASK				(1 << 1)
 #define OMAP3430_EN_SDRC_SHIFT				1
 
 /* AM35XX specific CM_ICLKEN1_CORE bits */
 #define AM35XX_EN_IPSS_MASK				(1 << 4)
 #define AM35XX_EN_IPSS_SHIFT				4
-#define AM35XX_EN_UART4_MASK			(1 << 23)
+#define AM35XX_EN_UART4_MASK				(1 << 23)
 #define AM35XX_EN_UART4_SHIFT				23
 
 /* CM_ICLKEN2_CORE */
-#define OMAP3430_EN_PKA					(1 << 4)
+#define OMAP3430_EN_PKA_MASK				(1 << 4)
 #define OMAP3430_EN_PKA_SHIFT				4
-#define OMAP3430_EN_AES1				(1 << 3)
+#define OMAP3430_EN_AES1_MASK				(1 << 3)
 #define OMAP3430_EN_AES1_SHIFT				3
-#define OMAP3430_EN_RNG					(1 << 2)
+#define OMAP3430_EN_RNG_MASK				(1 << 2)
 #define OMAP3430_EN_RNG_SHIFT				2
-#define OMAP3430_EN_SHA11				(1 << 1)
+#define OMAP3430_EN_SHA11_MASK				(1 << 1)
 #define OMAP3430_EN_SHA11_SHIFT				1
-#define OMAP3430_EN_DES1				(1 << 0)
+#define OMAP3430_EN_DES1_MASK				(1 << 0)
 #define OMAP3430_EN_DES1_SHIFT				0
 
 /* CM_ICLKEN3_CORE */
 #define OMAP3430_EN_MAD2D_SHIFT				3
-#define OMAP3430_EN_MAD2D				(1 << 3)
+#define OMAP3430_EN_MAD2D_MASK				(1 << 3)
 
 /* CM_FCLKEN3_CORE specific bits */
 #define OMAP3430ES2_EN_TS_SHIFT				1
@@ -249,79 +249,79 @@
 #define OMAP3430ES2_ST_CPEFUSE_MASK			(1 << 0)
 
 /* CM_AUTOIDLE1_CORE */
-#define OMAP3430_AUTO_MODEM				(1 << 31)
+#define OMAP3430_AUTO_MODEM_MASK			(1 << 31)
 #define OMAP3430_AUTO_MODEM_SHIFT			31
-#define OMAP3430ES2_AUTO_MMC3				(1 << 30)
+#define OMAP3430ES2_AUTO_MMC3_MASK			(1 << 30)
 #define OMAP3430ES2_AUTO_MMC3_SHIFT			30
-#define OMAP3430ES2_AUTO_ICR				(1 << 29)
+#define OMAP3430ES2_AUTO_ICR_MASK			(1 << 29)
 #define OMAP3430ES2_AUTO_ICR_SHIFT			29
-#define OMAP3430_AUTO_AES2				(1 << 28)
+#define OMAP3430_AUTO_AES2_MASK				(1 << 28)
 #define OMAP3430_AUTO_AES2_SHIFT			28
-#define OMAP3430_AUTO_SHA12				(1 << 27)
+#define OMAP3430_AUTO_SHA12_MASK			(1 << 27)
 #define OMAP3430_AUTO_SHA12_SHIFT			27
-#define OMAP3430_AUTO_DES2				(1 << 26)
+#define OMAP3430_AUTO_DES2_MASK				(1 << 26)
 #define OMAP3430_AUTO_DES2_SHIFT			26
-#define OMAP3430_AUTO_MMC2				(1 << 25)
+#define OMAP3430_AUTO_MMC2_MASK				(1 << 25)
 #define OMAP3430_AUTO_MMC2_SHIFT			25
-#define OMAP3430_AUTO_MMC1				(1 << 24)
+#define OMAP3430_AUTO_MMC1_MASK				(1 << 24)
 #define OMAP3430_AUTO_MMC1_SHIFT			24
-#define OMAP3430_AUTO_MSPRO				(1 << 23)
+#define OMAP3430_AUTO_MSPRO_MASK			(1 << 23)
 #define OMAP3430_AUTO_MSPRO_SHIFT			23
-#define OMAP3430_AUTO_HDQ				(1 << 22)
+#define OMAP3430_AUTO_HDQ_MASK				(1 << 22)
 #define OMAP3430_AUTO_HDQ_SHIFT				22
-#define OMAP3430_AUTO_MCSPI4				(1 << 21)
+#define OMAP3430_AUTO_MCSPI4_MASK			(1 << 21)
 #define OMAP3430_AUTO_MCSPI4_SHIFT			21
-#define OMAP3430_AUTO_MCSPI3				(1 << 20)
+#define OMAP3430_AUTO_MCSPI3_MASK			(1 << 20)
 #define OMAP3430_AUTO_MCSPI3_SHIFT			20
-#define OMAP3430_AUTO_MCSPI2				(1 << 19)
+#define OMAP3430_AUTO_MCSPI2_MASK			(1 << 19)
 #define OMAP3430_AUTO_MCSPI2_SHIFT			19
-#define OMAP3430_AUTO_MCSPI1				(1 << 18)
+#define OMAP3430_AUTO_MCSPI1_MASK			(1 << 18)
 #define OMAP3430_AUTO_MCSPI1_SHIFT			18
-#define OMAP3430_AUTO_I2C3				(1 << 17)
+#define OMAP3430_AUTO_I2C3_MASK				(1 << 17)
 #define OMAP3430_AUTO_I2C3_SHIFT			17
-#define OMAP3430_AUTO_I2C2				(1 << 16)
+#define OMAP3430_AUTO_I2C2_MASK				(1 << 16)
 #define OMAP3430_AUTO_I2C2_SHIFT			16
-#define OMAP3430_AUTO_I2C1				(1 << 15)
+#define OMAP3430_AUTO_I2C1_MASK				(1 << 15)
 #define OMAP3430_AUTO_I2C1_SHIFT			15
-#define OMAP3430_AUTO_UART2				(1 << 14)
+#define OMAP3430_AUTO_UART2_MASK			(1 << 14)
 #define OMAP3430_AUTO_UART2_SHIFT			14
-#define OMAP3430_AUTO_UART1				(1 << 13)
+#define OMAP3430_AUTO_UART1_MASK			(1 << 13)
 #define OMAP3430_AUTO_UART1_SHIFT			13
-#define OMAP3430_AUTO_GPT11				(1 << 12)
+#define OMAP3430_AUTO_GPT11_MASK			(1 << 12)
 #define OMAP3430_AUTO_GPT11_SHIFT			12
-#define OMAP3430_AUTO_GPT10				(1 << 11)
+#define OMAP3430_AUTO_GPT10_MASK			(1 << 11)
 #define OMAP3430_AUTO_GPT10_SHIFT			11
-#define OMAP3430_AUTO_MCBSP5				(1 << 10)
+#define OMAP3430_AUTO_MCBSP5_MASK			(1 << 10)
 #define OMAP3430_AUTO_MCBSP5_SHIFT			10
-#define OMAP3430_AUTO_MCBSP1				(1 << 9)
+#define OMAP3430_AUTO_MCBSP1_MASK			(1 << 9)
 #define OMAP3430_AUTO_MCBSP1_SHIFT			9
-#define OMAP3430ES1_AUTO_FAC				(1 << 8)
+#define OMAP3430ES1_AUTO_FAC_MASK			(1 << 8)
 #define OMAP3430ES1_AUTO_FAC_SHIFT			8
-#define OMAP3430_AUTO_MAILBOXES				(1 << 7)
+#define OMAP3430_AUTO_MAILBOXES_MASK			(1 << 7)
 #define OMAP3430_AUTO_MAILBOXES_SHIFT			7
-#define OMAP3430_AUTO_OMAPCTRL				(1 << 6)
+#define OMAP3430_AUTO_OMAPCTRL_MASK			(1 << 6)
 #define OMAP3430_AUTO_OMAPCTRL_SHIFT			6
-#define OMAP3430ES1_AUTO_FSHOSTUSB			(1 << 5)
+#define OMAP3430ES1_AUTO_FSHOSTUSB_MASK			(1 << 5)
 #define OMAP3430ES1_AUTO_FSHOSTUSB_SHIFT		5
-#define OMAP3430_AUTO_HSOTGUSB				(1 << 4)
+#define OMAP3430_AUTO_HSOTGUSB_MASK			(1 << 4)
 #define OMAP3430_AUTO_HSOTGUSB_SHIFT			4
-#define OMAP3430ES1_AUTO_D2D				(1 << 3)
+#define OMAP3430ES1_AUTO_D2D_MASK			(1 << 3)
 #define OMAP3430ES1_AUTO_D2D_SHIFT			3
-#define OMAP3430_AUTO_SAD2D				(1 << 3)
+#define OMAP3430_AUTO_SAD2D_MASK			(1 << 3)
 #define OMAP3430_AUTO_SAD2D_SHIFT			3
-#define OMAP3430_AUTO_SSI				(1 << 0)
+#define OMAP3430_AUTO_SSI_MASK				(1 << 0)
 #define OMAP3430_AUTO_SSI_SHIFT				0
 
 /* CM_AUTOIDLE2_CORE */
-#define OMAP3430_AUTO_PKA				(1 << 4)
+#define OMAP3430_AUTO_PKA_MASK				(1 << 4)
 #define OMAP3430_AUTO_PKA_SHIFT				4
-#define OMAP3430_AUTO_AES1				(1 << 3)
+#define OMAP3430_AUTO_AES1_MASK				(1 << 3)
 #define OMAP3430_AUTO_AES1_SHIFT			3
-#define OMAP3430_AUTO_RNG				(1 << 2)
+#define OMAP3430_AUTO_RNG_MASK				(1 << 2)
 #define OMAP3430_AUTO_RNG_SHIFT				2
-#define OMAP3430_AUTO_SHA11				(1 << 1)
+#define OMAP3430_AUTO_SHA11_MASK			(1 << 1)
 #define OMAP3430_AUTO_SHA11_SHIFT			1
-#define OMAP3430_AUTO_DES1				(1 << 0)
+#define OMAP3430_AUTO_DES1_MASK				(1 << 0)
 #define OMAP3430_AUTO_DES1_SHIFT			0
 
 /* CM_AUTOIDLE3_CORE */
@@ -331,7 +331,7 @@
 #define OMAP3430ES2_AUTO_USBTLL_SHIFT			2
 #define OMAP3430ES2_AUTO_USBTLL_MASK			(1 << 2)
 #define OMAP3430_AUTO_MAD2D_SHIFT			3
-#define OMAP3430_AUTO_MAD2D				(1 << 3)
+#define OMAP3430_AUTO_MAD2D_MASK			(1 << 3)
 
 /* CM_CLKSEL_CORE */
 #define OMAP3430_CLKSEL_SSI_SHIFT			8
@@ -366,9 +366,9 @@
 #define OMAP3430_CLKACTIVITY_L3_MASK			(1 << 0)
 
 /* CM_FCLKEN_GFX */
-#define OMAP3430ES1_EN_3D				(1 << 2)
+#define OMAP3430ES1_EN_3D_MASK				(1 << 2)
 #define OMAP3430ES1_EN_3D_SHIFT				2
-#define OMAP3430ES1_EN_2D				(1 << 1)
+#define OMAP3430ES1_EN_2D_MASK				(1 << 1)
 #define OMAP3430ES1_EN_2D_SHIFT				1
 
 /* CM_ICLKEN_GFX specific bits */
@@ -416,9 +416,9 @@
 #define OMAP3430ES2_EN_USIMOCP_MASK			(1 << 9)
 
 /* CM_ICLKEN_WKUP specific bits */
-#define OMAP3430_EN_WDT1				(1 << 4)
+#define OMAP3430_EN_WDT1_MASK				(1 << 4)
 #define OMAP3430_EN_WDT1_SHIFT				4
-#define OMAP3430_EN_32KSYNC				(1 << 2)
+#define OMAP3430_EN_32KSYNC_MASK			(1 << 2)
 #define OMAP3430_EN_32KSYNC_SHIFT			2
 
 /* CM_IDLEST_WKUP specific bits */
@@ -432,19 +432,19 @@
 #define OMAP3430_ST_32KSYNC_MASK			(1 << 2)
 
 /* CM_AUTOIDLE_WKUP */
-#define OMAP3430ES2_AUTO_USIMOCP				(1 << 9)
+#define OMAP3430ES2_AUTO_USIMOCP_MASK			(1 << 9)
 #define OMAP3430ES2_AUTO_USIMOCP_SHIFT			9
-#define OMAP3430_AUTO_WDT2				(1 << 5)
+#define OMAP3430_AUTO_WDT2_MASK				(1 << 5)
 #define OMAP3430_AUTO_WDT2_SHIFT			5
-#define OMAP3430_AUTO_WDT1				(1 << 4)
+#define OMAP3430_AUTO_WDT1_MASK				(1 << 4)
 #define OMAP3430_AUTO_WDT1_SHIFT			4
-#define OMAP3430_AUTO_GPIO1				(1 << 3)
+#define OMAP3430_AUTO_GPIO1_MASK			(1 << 3)
 #define OMAP3430_AUTO_GPIO1_SHIFT			3
-#define OMAP3430_AUTO_32KSYNC				(1 << 2)
+#define OMAP3430_AUTO_32KSYNC_MASK			(1 << 2)
 #define OMAP3430_AUTO_32KSYNC_SHIFT			2
-#define OMAP3430_AUTO_GPT12				(1 << 1)
+#define OMAP3430_AUTO_GPT12_MASK			(1 << 1)
 #define OMAP3430_AUTO_GPT12_SHIFT			1
-#define OMAP3430_AUTO_GPT1				(1 << 0)
+#define OMAP3430_AUTO_GPT1_MASK				(1 << 0)
 #define OMAP3430_AUTO_GPT1_SHIFT			0
 
 /* CM_CLKSEL_WKUP */
@@ -479,7 +479,7 @@
 #define OMAP3430_EN_CORE_DPLL_MASK			(0x7 << 0)
 
 /* CM_CLKEN2_PLL */
-#define OMAP3430ES2_EN_PERIPH2_DPLL_LPMODE_SHIFT		10
+#define OMAP3430ES2_EN_PERIPH2_DPLL_LPMODE_SHIFT	10
 #define OMAP3430ES2_PERIPH2_DPLL_RAMPTIME_MASK		(0x3 << 8)
 #define OMAP3430ES2_PERIPH2_DPLL_FREQSEL_SHIFT		4
 #define OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK		(0xf << 4)
@@ -488,10 +488,10 @@
 #define OMAP3430ES2_EN_PERIPH2_DPLL_MASK		(0x7 << 0)
 
 /* CM_IDLEST_CKGEN */
-#define OMAP3430_ST_54M_CLK				(1 << 5)
-#define OMAP3430_ST_12M_CLK				(1 << 4)
-#define OMAP3430_ST_48M_CLK				(1 << 3)
-#define OMAP3430_ST_96M_CLK				(1 << 2)
+#define OMAP3430_ST_54M_CLK_MASK			(1 << 5)
+#define OMAP3430_ST_12M_CLK_MASK			(1 << 4)
+#define OMAP3430_ST_48M_CLK_MASK			(1 << 3)
+#define OMAP3430_ST_96M_CLK_MASK			(1 << 2)
 #define OMAP3430_ST_PERIPH_CLK_SHIFT			1
 #define OMAP3430_ST_PERIPH_CLK_MASK			(1 << 1)
 #define OMAP3430_ST_CORE_CLK_SHIFT			0
@@ -558,22 +558,22 @@
 
 /* CM_CLKOUT_CTRL */
 #define OMAP3430_CLKOUT2_EN_SHIFT			7
-#define OMAP3430_CLKOUT2_EN				(1 << 7)
+#define OMAP3430_CLKOUT2_EN_MASK			(1 << 7)
 #define OMAP3430_CLKOUT2_DIV_SHIFT			3
 #define OMAP3430_CLKOUT2_DIV_MASK			(0x7 << 3)
 #define OMAP3430_CLKOUT2SOURCE_SHIFT			0
 #define OMAP3430_CLKOUT2SOURCE_MASK			(0x3 << 0)
 
 /* CM_FCLKEN_DSS */
-#define OMAP3430_EN_TV					(1 << 2)
+#define OMAP3430_EN_TV_MASK				(1 << 2)
 #define OMAP3430_EN_TV_SHIFT				2
-#define OMAP3430_EN_DSS2				(1 << 1)
+#define OMAP3430_EN_DSS2_MASK				(1 << 1)
 #define OMAP3430_EN_DSS2_SHIFT				1
-#define OMAP3430_EN_DSS1				(1 << 0)
+#define OMAP3430_EN_DSS1_MASK				(1 << 0)
 #define OMAP3430_EN_DSS1_SHIFT				0
 
 /* CM_ICLKEN_DSS */
-#define OMAP3430_CM_ICLKEN_DSS_EN_DSS			(1 << 0)
+#define OMAP3430_CM_ICLKEN_DSS_EN_DSS_MASK		(1 << 0)
 #define OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT		0
 
 /* CM_IDLEST_DSS */
@@ -585,7 +585,7 @@
 #define OMAP3430ES1_ST_DSS_MASK				(1 << 0)
 
 /* CM_AUTOIDLE_DSS */
-#define OMAP3430_AUTO_DSS				(1 << 0)
+#define OMAP3430_AUTO_DSS_MASK				(1 << 0)
 #define OMAP3430_AUTO_DSS_SHIFT				0
 
 /* CM_CLKSEL_DSS */
@@ -607,16 +607,16 @@
 #define OMAP3430_CLKACTIVITY_DSS_MASK			(1 << 0)
 
 /* CM_FCLKEN_CAM specific bits */
-#define OMAP3430_EN_CSI2				(1 << 1)
+#define OMAP3430_EN_CSI2_MASK				(1 << 1)
 #define OMAP3430_EN_CSI2_SHIFT				1
 
 /* CM_ICLKEN_CAM specific bits */
 
 /* CM_IDLEST_CAM */
-#define OMAP3430_ST_CAM					(1 << 0)
+#define OMAP3430_ST_CAM_MASK				(1 << 0)
 
 /* CM_AUTOIDLE_CAM */
-#define OMAP3430_AUTO_CAM				(1 << 0)
+#define OMAP3430_AUTO_CAM_MASK				(1 << 0)
 #define OMAP3430_AUTO_CAM_SHIFT				0
 
 /* CM_CLKSEL_CAM */
@@ -649,41 +649,41 @@
 #define OMAP3430_ST_MCBSP2_MASK				(1 << 0)
 
 /* CM_AUTOIDLE_PER */
-#define OMAP3430_AUTO_GPIO6				(1 << 17)
+#define OMAP3430_AUTO_GPIO6_MASK			(1 << 17)
 #define OMAP3430_AUTO_GPIO6_SHIFT			17
-#define OMAP3430_AUTO_GPIO5				(1 << 16)
+#define OMAP3430_AUTO_GPIO5_MASK			(1 << 16)
 #define OMAP3430_AUTO_GPIO5_SHIFT			16
-#define OMAP3430_AUTO_GPIO4				(1 << 15)
+#define OMAP3430_AUTO_GPIO4_MASK			(1 << 15)
 #define OMAP3430_AUTO_GPIO4_SHIFT			15
-#define OMAP3430_AUTO_GPIO3				(1 << 14)
+#define OMAP3430_AUTO_GPIO3_MASK			(1 << 14)
 #define OMAP3430_AUTO_GPIO3_SHIFT			14
-#define OMAP3430_AUTO_GPIO2				(1 << 13)
+#define OMAP3430_AUTO_GPIO2_MASK			(1 << 13)
 #define OMAP3430_AUTO_GPIO2_SHIFT			13
-#define OMAP3430_AUTO_WDT3				(1 << 12)
+#define OMAP3430_AUTO_WDT3_MASK				(1 << 12)
 #define OMAP3430_AUTO_WDT3_SHIFT			12
-#define OMAP3430_AUTO_UART3				(1 << 11)
+#define OMAP3430_AUTO_UART3_MASK			(1 << 11)
 #define OMAP3430_AUTO_UART3_SHIFT			11
-#define OMAP3430_AUTO_GPT9				(1 << 10)
+#define OMAP3430_AUTO_GPT9_MASK				(1 << 10)
 #define OMAP3430_AUTO_GPT9_SHIFT			10
-#define OMAP3430_AUTO_GPT8				(1 << 9)
+#define OMAP3430_AUTO_GPT8_MASK				(1 << 9)
 #define OMAP3430_AUTO_GPT8_SHIFT			9
-#define OMAP3430_AUTO_GPT7				(1 << 8)
+#define OMAP3430_AUTO_GPT7_MASK				(1 << 8)
 #define OMAP3430_AUTO_GPT7_SHIFT			8
-#define OMAP3430_AUTO_GPT6				(1 << 7)
+#define OMAP3430_AUTO_GPT6_MASK				(1 << 7)
 #define OMAP3430_AUTO_GPT6_SHIFT			7
-#define OMAP3430_AUTO_GPT5				(1 << 6)
+#define OMAP3430_AUTO_GPT5_MASK				(1 << 6)
 #define OMAP3430_AUTO_GPT5_SHIFT			6
-#define OMAP3430_AUTO_GPT4				(1 << 5)
+#define OMAP3430_AUTO_GPT4_MASK				(1 << 5)
 #define OMAP3430_AUTO_GPT4_SHIFT			5
-#define OMAP3430_AUTO_GPT3				(1 << 4)
+#define OMAP3430_AUTO_GPT3_MASK				(1 << 4)
 #define OMAP3430_AUTO_GPT3_SHIFT			4
-#define OMAP3430_AUTO_GPT2				(1 << 3)
+#define OMAP3430_AUTO_GPT2_MASK				(1 << 3)
 #define OMAP3430_AUTO_GPT2_SHIFT			3
-#define OMAP3430_AUTO_MCBSP4				(1 << 2)
+#define OMAP3430_AUTO_MCBSP4_MASK			(1 << 2)
 #define OMAP3430_AUTO_MCBSP4_SHIFT			2
-#define OMAP3430_AUTO_MCBSP3				(1 << 1)
+#define OMAP3430_AUTO_MCBSP3_MASK			(1 << 1)
 #define OMAP3430_AUTO_MCBSP3_SHIFT			1
-#define OMAP3430_AUTO_MCBSP2				(1 << 0)
+#define OMAP3430_AUTO_MCBSP2_MASK			(1 << 0)
 #define OMAP3430_AUTO_MCBSP2_SHIFT			0
 
 /* CM_CLKSEL_PER */
@@ -705,7 +705,7 @@
 #define OMAP3430_CLKSEL_GPT2_SHIFT			0
 
 /* CM_SLEEPDEP_PER specific bits */
-#define OMAP3430_CM_SLEEPDEP_PER_EN_IVA2		(1 << 2)
+#define OMAP3430_CM_SLEEPDEP_PER_EN_IVA2_MASK		(1 << 2)
 
 /* CM_CLKSTCTRL_PER */
 #define OMAP3430_CLKTRCTRL_PER_SHIFT			0
@@ -755,10 +755,10 @@
 #define OMAP3430_PERIPH_DPLL_EMU_DIV_MASK		(0x7f << 0)
 
 /* CM_POLCTRL */
-#define OMAP3430_CLKOUT2_POL				(1 << 0)
+#define OMAP3430_CLKOUT2_POL_MASK			(1 << 0)
 
 /* CM_IDLEST_NEON */
-#define OMAP3430_ST_NEON				(1 << 0)
+#define OMAP3430_ST_NEON_MASK				(1 << 0)
 
 /* CM_CLKSTCTRL_NEON */
 #define OMAP3430_CLKTRCTRL_NEON_SHIFT			0
diff --git a/arch/arm/mach-omap2/cm.c b/arch/arm/mach-omap2/cm.c
index 58e4a1c..2d83565 100644
--- a/arch/arm/mach-omap2/cm.c
+++ b/arch/arm/mach-omap2/cm.c
@@ -27,9 +27,6 @@
 #include "cm-regbits-24xx.h"
 #include "cm-regbits-34xx.h"
 
-/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
-#define MAX_MODULE_READY_TIME		20000
-
 static const u8 cm_idlest_offs[] = {
 	CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3
 };
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index 94728b1..a02ca30 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -112,7 +112,7 @@
 
 extern int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id,
 				      u8 idlest_shift);
-extern int omap4_cm_wait_module_ready(u32 prcm_mod, u8 prcm_dev_offs);
+extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
 
 static inline u32 cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
 {
@@ -134,13 +134,23 @@
 
 /* CM_ICLKEN_GFX */
 #define OMAP_EN_GFX_SHIFT				0
-#define OMAP_EN_GFX					(1 << 0)
+#define OMAP_EN_GFX_MASK				(1 << 0)
 
 /* CM_IDLEST_GFX */
-#define OMAP_ST_GFX					(1 << 0)
+#define OMAP_ST_GFX_MASK				(1 << 0)
+
 
 /* CM_IDLEST indicator */
 #define OMAP24XX_CM_IDLEST_VAL		0
 #define OMAP34XX_CM_IDLEST_VAL		1
 
+/*
+ * MAX_MODULE_READY_TIME: max duration in microseconds to wait for the
+ * PRCM to request that a module exit the inactive state in the case of
+ * OMAP2 & 3.
+ * In the case of OMAP4 this is the max duration in microseconds for the
+ * module to reach the functionnal state from an inactive state.
+ */
+#define MAX_MODULE_READY_TIME		2000
+
 #endif
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index c575b9b..336d948 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -1,8 +1,8 @@
 /*
  * OMAP44xx CM1 & CM2 instance offset macros
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
  *
  * Paul Walmsley (paul@pwsan.com)
  * Rajendra Nayak (rnayak@ti.com)
@@ -25,334 +25,557 @@
 
 /* CM1 */
 
-
 /* CM1.OCP_SOCKET_CM1 register offsets */
+#define OMAP4_REVISION_CM1_OFFSET			0x0000
 #define OMAP4430_REVISION_CM1				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_CM_CM1_PROFILING_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM_CM1_PROFILING_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0040)
 
 /* CM1.CKGEN_CM1 register offsets */
+#define OMAP4_CM_CLKSEL_CORE_OFFSET			0x0000
 #define OMAP4430_CM_CLKSEL_CORE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_CLKSEL_ABE_OFFSET			0x0008
 #define OMAP4430_CM_CLKSEL_ABE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_DLL_CTRL_OFFSET			0x0010
 #define OMAP4430_CM_DLL_CTRL				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0010)
+#define OMAP4_CM_CLKMODE_DPLL_CORE_OFFSET		0x0020
 #define OMAP4430_CM_CLKMODE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0020)
+#define OMAP4_CM_IDLEST_DPLL_CORE_OFFSET		0x0024
 #define OMAP4430_CM_IDLEST_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0024)
+#define OMAP4_CM_AUTOIDLE_DPLL_CORE_OFFSET		0x0028
 #define OMAP4430_CM_AUTOIDLE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0028)
+#define OMAP4_CM_CLKSEL_DPLL_CORE_OFFSET		0x002c
 #define OMAP4430_CM_CLKSEL_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x002c)
+#define OMAP4_CM_DIV_M2_DPLL_CORE_OFFSET		0x0030
 #define OMAP4430_CM_DIV_M2_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0030)
+#define OMAP4_CM_DIV_M3_DPLL_CORE_OFFSET		0x0034
 #define OMAP4430_CM_DIV_M3_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0034)
+#define OMAP4_CM_DIV_M4_DPLL_CORE_OFFSET		0x0038
 #define OMAP4430_CM_DIV_M4_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0038)
+#define OMAP4_CM_DIV_M5_DPLL_CORE_OFFSET		0x003c
 #define OMAP4430_CM_DIV_M5_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x003c)
+#define OMAP4_CM_DIV_M6_DPLL_CORE_OFFSET		0x0040
 #define OMAP4430_CM_DIV_M6_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0040)
+#define OMAP4_CM_DIV_M7_DPLL_CORE_OFFSET		0x0044
 #define OMAP4430_CM_DIV_M7_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0044)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET	0x0048
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0048)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET	0x004c
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x004c)
+#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET		0x0050
 #define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0050)
+#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET		0x0060
 #define OMAP4430_CM_CLKMODE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0060)
+#define OMAP4_CM_IDLEST_DPLL_MPU_OFFSET			0x0064
 #define OMAP4430_CM_IDLEST_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0064)
+#define OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET		0x0068
 #define OMAP4430_CM_AUTOIDLE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0068)
+#define OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET			0x006c
 #define OMAP4430_CM_CLKSEL_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x006c)
+#define OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET			0x0070
 #define OMAP4430_CM_DIV_M2_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0070)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET		0x0088
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0088)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET		0x008c
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x008c)
+#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET			0x009c
 #define OMAP4430_CM_BYPCLK_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x009c)
+#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET		0x00a0
 #define OMAP4430_CM_CLKMODE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a0)
+#define OMAP4_CM_IDLEST_DPLL_IVA_OFFSET			0x00a4
 #define OMAP4430_CM_IDLEST_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a4)
+#define OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET		0x00a8
 #define OMAP4430_CM_AUTOIDLE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a8)
+#define OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET			0x00ac
 #define OMAP4430_CM_CLKSEL_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ac)
+#define OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET			0x00b8
 #define OMAP4430_CM_DIV_M4_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00b8)
+#define OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET			0x00bc
 #define OMAP4430_CM_DIV_M5_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00bc)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET		0x00c8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00c8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET		0x00cc
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00cc)
+#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET			0x00dc
 #define OMAP4430_CM_BYPCLK_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00dc)
+#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET		0x00e0
 #define OMAP4430_CM_CLKMODE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e0)
+#define OMAP4_CM_IDLEST_DPLL_ABE_OFFSET			0x00e4
 #define OMAP4430_CM_IDLEST_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e4)
+#define OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET		0x00e8
 #define OMAP4430_CM_AUTOIDLE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e8)
+#define OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET			0x00ec
 #define OMAP4430_CM_CLKSEL_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ec)
+#define OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET			0x00f0
 #define OMAP4430_CM_DIV_M2_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f0)
+#define OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET			0x00f4
 #define OMAP4430_CM_DIV_M3_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f4)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET		0x0108
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0108)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET		0x010c
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x010c)
+#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET		0x0120
 #define OMAP4430_CM_CLKMODE_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0120)
+#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET		0x0124
 #define OMAP4430_CM_IDLEST_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0124)
+#define OMAP4_CM_AUTOIDLE_DPLL_DDRPHY_OFFSET		0x0128
 #define OMAP4430_CM_AUTOIDLE_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0128)
+#define OMAP4_CM_CLKSEL_DPLL_DDRPHY_OFFSET		0x012c
 #define OMAP4430_CM_CLKSEL_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x012c)
+#define OMAP4_CM_DIV_M2_DPLL_DDRPHY_OFFSET		0x0130
 #define OMAP4430_CM_DIV_M2_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0130)
+#define OMAP4_CM_DIV_M4_DPLL_DDRPHY_OFFSET		0x0138
 #define OMAP4430_CM_DIV_M4_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0138)
+#define OMAP4_CM_DIV_M5_DPLL_DDRPHY_OFFSET		0x013c
 #define OMAP4430_CM_DIV_M5_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x013c)
+#define OMAP4_CM_DIV_M6_DPLL_DDRPHY_OFFSET		0x0140
 #define OMAP4430_CM_DIV_M6_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0140)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET	0x0148
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0148)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET	0x014c
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x014c)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET		0x0160
 #define OMAP4430_CM_SHADOW_FREQ_CONFIG1			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0160)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET		0x0164
 #define OMAP4430_CM_SHADOW_FREQ_CONFIG2			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0164)
+#define OMAP4_CM_DYN_DEP_PRESCAL_OFFSET			0x0170
 #define OMAP4430_CM_DYN_DEP_PRESCAL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0170)
+#define OMAP4_CM_RESTORE_ST_OFFSET			0x0180
 #define OMAP4430_CM_RESTORE_ST				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0180)
 
 /* CM1.MPU_CM1 register offsets */
+#define OMAP4_CM_MPU_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_MPU_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0000)
+#define OMAP4_CM_MPU_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_MPU_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0004)
+#define OMAP4_CM_MPU_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_MPU_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0008)
+#define OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET			0x0020
 #define OMAP4430_CM_MPU_MPU_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0020)
 
 /* CM1.TESLA_CM1 register offsets */
+#define OMAP4_CM_TESLA_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_TESLA_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0000)
+#define OMAP4_CM_TESLA_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_TESLA_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0004)
+#define OMAP4_CM_TESLA_DYNAMICDEP_OFFSET		0x0008
 #define OMAP4430_CM_TESLA_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0008)
+#define OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_TESLA_TESLA_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0020)
 
 /* CM1.ABE_CM1 register offsets */
+#define OMAP4_CM1_ABE_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM1_ABE_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0000)
+#define OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM1_ABE_L4ABE_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0020)
+#define OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM1_ABE_AESS_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0028)
+#define OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET		0x0030
 #define OMAP4430_CM1_ABE_PDM_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0030)
+#define OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET		0x0038
 #define OMAP4430_CM1_ABE_DMIC_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0038)
+#define OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM1_ABE_MCASP_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0040)
+#define OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET		0x0048
 #define OMAP4430_CM1_ABE_MCBSP1_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0048)
+#define OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET		0x0050
 #define OMAP4430_CM1_ABE_MCBSP2_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0050)
+#define OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET		0x0058
 #define OMAP4430_CM1_ABE_MCBSP3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0058)
+#define OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET		0x0060
 #define OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0060)
+#define OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET		0x0068
 #define OMAP4430_CM1_ABE_TIMER5_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0068)
+#define OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET		0x0070
 #define OMAP4430_CM1_ABE_TIMER6_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0070)
+#define OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET		0x0078
 #define OMAP4430_CM1_ABE_TIMER7_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0078)
+#define OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET		0x0080
 #define OMAP4430_CM1_ABE_TIMER8_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0080)
+#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET		0x0088
 #define OMAP4430_CM1_ABE_WDT3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0088)
 
-/* CM1.RESTORE_CM1 register offsets */
-#define OMAP4430_CM_CLKSEL_CORE_RESTORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0000)
-#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0004)
-#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0008)
-#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x000c)
-#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0010)
-#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0014)
-#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0018)
-#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x001c)
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0020)
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0024)
-#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0028)
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x002c)
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0030)
-#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0034)
-
 /* CM2 */
 
-
 /* CM2.OCP_SOCKET_CM2 register offsets */
+#define OMAP4_REVISION_CM2_OFFSET			0x0000
 #define OMAP4430_REVISION_CM2				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_CM_CM2_PROFILING_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM_CM2_PROFILING_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0040)
 
 /* CM2.CKGEN_CM2 register offsets */
+#define OMAP4_CM_CLKSEL_DUCATI_ISS_ROOT_OFFSET		0x0000
 #define OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_CLKSEL_USB_60MHZ_OFFSET		0x0004
 #define OMAP4430_CM_CLKSEL_USB_60MHZ			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0004)
+#define OMAP4_CM_SCALE_FCLK_OFFSET			0x0008
 #define OMAP4430_CM_SCALE_FCLK				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_CORE_DVFS_PERF1_OFFSET			0x0010
 #define OMAP4430_CM_CORE_DVFS_PERF1			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0010)
+#define OMAP4_CM_CORE_DVFS_PERF2_OFFSET			0x0014
 #define OMAP4430_CM_CORE_DVFS_PERF2			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0014)
+#define OMAP4_CM_CORE_DVFS_PERF3_OFFSET			0x0018
 #define OMAP4430_CM_CORE_DVFS_PERF3			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0018)
+#define OMAP4_CM_CORE_DVFS_PERF4_OFFSET			0x001c
 #define OMAP4430_CM_CORE_DVFS_PERF4			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x001c)
+#define OMAP4_CM_CORE_DVFS_CURRENT_OFFSET		0x0024
 #define OMAP4430_CM_CORE_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0024)
+#define OMAP4_CM_IVA_DVFS_PERF_TESLA_OFFSET		0x0028
 #define OMAP4430_CM_IVA_DVFS_PERF_TESLA			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0028)
+#define OMAP4_CM_IVA_DVFS_PERF_IVAHD_OFFSET		0x002c
 #define OMAP4430_CM_IVA_DVFS_PERF_IVAHD			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x002c)
+#define OMAP4_CM_IVA_DVFS_PERF_ABE_OFFSET		0x0030
 #define OMAP4430_CM_IVA_DVFS_PERF_ABE			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0030)
+#define OMAP4_CM_IVA_DVFS_CURRENT_OFFSET		0x0038
 #define OMAP4430_CM_IVA_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0038)
+#define OMAP4_CM_CLKMODE_DPLL_PER_OFFSET		0x0040
 #define OMAP4430_CM_CLKMODE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0040)
+#define OMAP4_CM_IDLEST_DPLL_PER_OFFSET			0x0044
 #define OMAP4430_CM_IDLEST_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0044)
+#define OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET		0x0048
 #define OMAP4430_CM_AUTOIDLE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0048)
+#define OMAP4_CM_CLKSEL_DPLL_PER_OFFSET			0x004c
 #define OMAP4430_CM_CLKSEL_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x004c)
+#define OMAP4_CM_DIV_M2_DPLL_PER_OFFSET			0x0050
 #define OMAP4430_CM_DIV_M2_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0050)
+#define OMAP4_CM_DIV_M3_DPLL_PER_OFFSET			0x0054
 #define OMAP4430_CM_DIV_M3_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0054)
+#define OMAP4_CM_DIV_M4_DPLL_PER_OFFSET			0x0058
 #define OMAP4430_CM_DIV_M4_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0058)
+#define OMAP4_CM_DIV_M5_DPLL_PER_OFFSET			0x005c
 #define OMAP4430_CM_DIV_M5_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x005c)
+#define OMAP4_CM_DIV_M6_DPLL_PER_OFFSET			0x0060
 #define OMAP4430_CM_DIV_M6_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0060)
+#define OMAP4_CM_DIV_M7_DPLL_PER_OFFSET			0x0064
 #define OMAP4430_CM_DIV_M7_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0064)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET		0x0068
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0068)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET		0x006c
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x006c)
+#define OMAP4_CM_EMU_OVERRIDE_DPLL_PER_OFFSET		0x0070
 #define OMAP4430_CM_EMU_OVERRIDE_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0070)
+#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET		0x0080
 #define OMAP4430_CM_CLKMODE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0080)
+#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET			0x0084
 #define OMAP4430_CM_IDLEST_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0084)
+#define OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET		0x0088
 #define OMAP4430_CM_AUTOIDLE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0088)
+#define OMAP4_CM_CLKSEL_DPLL_USB_OFFSET			0x008c
 #define OMAP4430_CM_CLKSEL_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x008c)
+#define OMAP4_CM_DIV_M2_DPLL_USB_OFFSET			0x0090
 #define OMAP4430_CM_DIV_M2_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0090)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET		0x00a8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00a8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET		0x00ac
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ac)
+#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET		0x00b4
 #define OMAP4430_CM_CLKDCOLDO_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00b4)
+#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET		0x00c0
 #define OMAP4430_CM_CLKMODE_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c0)
+#define OMAP4_CM_IDLEST_DPLL_UNIPRO_OFFSET		0x00c4
 #define OMAP4430_CM_IDLEST_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c4)
+#define OMAP4_CM_AUTOIDLE_DPLL_UNIPRO_OFFSET		0x00c8
 #define OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c8)
+#define OMAP4_CM_CLKSEL_DPLL_UNIPRO_OFFSET		0x00cc
 #define OMAP4430_CM_CLKSEL_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00cc)
+#define OMAP4_CM_DIV_M2_DPLL_UNIPRO_OFFSET		0x00d0
 #define OMAP4430_CM_DIV_M2_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00d0)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET	0x00e8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00e8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET	0x00ec
 #define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ec)
 
 /* CM2.ALWAYS_ON_CM2 register offsets */
+#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_ALWON_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0000)
+#define OMAP4_CM_ALWON_MDMINTC_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_ALWON_MDMINTC_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0020)
+#define OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_ALWON_SR_MPU_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET		0x0030
 #define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET		0x0038
 #define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0038)
 
 /* CM2.CORE_CM2 register offsets */
+#define OMAP4_CM_L3_1_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_L3_1_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0000)
+#define OMAP4_CM_L3_1_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_L3_1_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0008)
+#define OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_L3_1_L3_1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0020)
+#define OMAP4_CM_L3_2_CLKSTCTRL_OFFSET			0x0100
 #define OMAP4430_CM_L3_2_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0100)
+#define OMAP4_CM_L3_2_DYNAMICDEP_OFFSET			0x0108
 #define OMAP4430_CM_L3_2_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0108)
+#define OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET		0x0120
 #define OMAP4430_CM_L3_2_L3_2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0120)
+#define OMAP4_CM_L3_2_GPMC_CLKCTRL_OFFSET		0x0128
 #define OMAP4430_CM_L3_2_GPMC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0128)
+#define OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET		0x0130
 #define OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0130)
+#define OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET		0x0200
 #define OMAP4430_CM_DUCATI_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0200)
+#define OMAP4_CM_DUCATI_STATICDEP_OFFSET		0x0204
 #define OMAP4430_CM_DUCATI_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0204)
+#define OMAP4_CM_DUCATI_DYNAMICDEP_OFFSET		0x0208
 #define OMAP4430_CM_DUCATI_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0208)
+#define OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET		0x0220
 #define OMAP4430_CM_DUCATI_DUCATI_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0220)
+#define OMAP4_CM_SDMA_CLKSTCTRL_OFFSET			0x0300
 #define OMAP4430_CM_SDMA_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0300)
+#define OMAP4_CM_SDMA_STATICDEP_OFFSET			0x0304
 #define OMAP4430_CM_SDMA_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0304)
+#define OMAP4_CM_SDMA_DYNAMICDEP_OFFSET			0x0308
 #define OMAP4430_CM_SDMA_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0308)
+#define OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET		0x0320
 #define OMAP4430_CM_SDMA_SDMA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0320)
+#define OMAP4_CM_MEMIF_CLKSTCTRL_OFFSET			0x0400
 #define OMAP4430_CM_MEMIF_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0400)
+#define OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET		0x0420
 #define OMAP4430_CM_MEMIF_DMM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0420)
+#define OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET		0x0428
 #define OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0428)
+#define OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET		0x0430
 #define OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0430)
+#define OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET		0x0438
 #define OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0438)
+#define OMAP4_CM_MEMIF_DLL_CLKCTRL_OFFSET		0x0440
 #define OMAP4430_CM_MEMIF_DLL_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0440)
+#define OMAP4_CM_MEMIF_EMIF_H1_CLKCTRL_OFFSET		0x0450
 #define OMAP4430_CM_MEMIF_EMIF_H1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0450)
+#define OMAP4_CM_MEMIF_EMIF_H2_CLKCTRL_OFFSET		0x0458
 #define OMAP4430_CM_MEMIF_EMIF_H2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0458)
+#define OMAP4_CM_MEMIF_DLL_H_CLKCTRL_OFFSET		0x0460
 #define OMAP4430_CM_MEMIF_DLL_H_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0460)
+#define OMAP4_CM_D2D_CLKSTCTRL_OFFSET			0x0500
 #define OMAP4430_CM_D2D_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0500)
+#define OMAP4_CM_D2D_STATICDEP_OFFSET			0x0504
 #define OMAP4430_CM_D2D_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0504)
+#define OMAP4_CM_D2D_DYNAMICDEP_OFFSET			0x0508
 #define OMAP4430_CM_D2D_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0508)
+#define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET		0x0520
 #define OMAP4430_CM_D2D_SAD2D_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0520)
+#define OMAP4_CM_D2D_MODEM_ICR_CLKCTRL_OFFSET		0x0528
 #define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0528)
+#define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET		0x0530
 #define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0530)
+#define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET			0x0600
 #define OMAP4430_CM_L4CFG_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0600)
+#define OMAP4_CM_L4CFG_DYNAMICDEP_OFFSET		0x0608
 #define OMAP4430_CM_L4CFG_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0608)
+#define OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET		0x0620
 #define OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0620)
+#define OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET		0x0628
 #define OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0628)
+#define OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET		0x0630
 #define OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0630)
+#define OMAP4_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET		0x0638
 #define OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0638)
+#define OMAP4_CM_L3INSTR_CLKSTCTRL_OFFSET		0x0700
 #define OMAP4430_CM_L3INSTR_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0700)
+#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET		0x0720
 #define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0720)
+#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET	0x0728
 #define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0728)
+#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET		0x0740
 #define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0740)
 
 /* CM2.IVAHD_CM2 register offsets */
+#define OMAP4_CM_IVAHD_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_IVAHD_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0000)
+#define OMAP4_CM_IVAHD_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_IVAHD_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0004)
+#define OMAP4_CM_IVAHD_DYNAMICDEP_OFFSET		0x0008
 #define OMAP4430_CM_IVAHD_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0008)
+#define OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_IVAHD_IVAHD_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0020)
+#define OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_IVAHD_SL2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0028)
 
 /* CM2.CAM_CM2 register offsets */
+#define OMAP4_CM_CAM_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_CAM_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0000)
+#define OMAP4_CM_CAM_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_CAM_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0004)
+#define OMAP4_CM_CAM_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_CAM_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0008)
+#define OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET			0x0020
 #define OMAP4430_CM_CAM_ISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0020)
+#define OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_CAM_FDIF_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0028)
 
 /* CM2.DSS_CM2 register offsets */
+#define OMAP4_CM_DSS_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_DSS_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0000)
+#define OMAP4_CM_DSS_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_DSS_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0004)
+#define OMAP4_CM_DSS_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_DSS_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0008)
+#define OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET			0x0020
 #define OMAP4430_CM_DSS_DSS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0020)
+#define OMAP4_CM_DSS_DEISS_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_DSS_DEISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0028)
 
 /* CM2.GFX_CM2 register offsets */
+#define OMAP4_CM_GFX_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_GFX_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0000)
+#define OMAP4_CM_GFX_STATICDEP_OFFSET			0x0004
 #define OMAP4430_CM_GFX_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0004)
+#define OMAP4_CM_GFX_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_GFX_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0008)
+#define OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET			0x0020
 #define OMAP4430_CM_GFX_GFX_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0020)
 
 /* CM2.L3INIT_CM2 register offsets */
+#define OMAP4_CM_L3INIT_CLKSTCTRL_OFFSET		0x0000
 #define OMAP4430_CM_L3INIT_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0000)
+#define OMAP4_CM_L3INIT_STATICDEP_OFFSET		0x0004
 #define OMAP4430_CM_L3INIT_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0004)
+#define OMAP4_CM_L3INIT_DYNAMICDEP_OFFSET		0x0008
 #define OMAP4430_CM_L3INIT_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0008)
+#define OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_L3INIT_MMC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0028)
+#define OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET		0x0030
 #define OMAP4430_CM_L3INIT_MMC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0030)
+#define OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET		0x0038
 #define OMAP4430_CM_L3INIT_HSI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0038)
+#define OMAP4_CM_L3INIT_UNIPRO1_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0040)
+#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET		0x0058
 #define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0058)
+#define OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET		0x0060
 #define OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0060)
+#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET		0x0068
 #define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0068)
+#define OMAP4_CM_L3INIT_P1500_CLKCTRL_OFFSET		0x0078
 #define OMAP4430_CM_L3INIT_P1500_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0078)
+#define OMAP4_CM_L3INIT_EMAC_CLKCTRL_OFFSET		0x0080
 #define OMAP4430_CM_L3INIT_EMAC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0080)
+#define OMAP4_CM_L3INIT_SATA_CLKCTRL_OFFSET		0x0088
 #define OMAP4430_CM_L3INIT_SATA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0088)
+#define OMAP4_CM_L3INIT_TPPSS_CLKCTRL_OFFSET		0x0090
 #define OMAP4430_CM_L3INIT_TPPSS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0090)
+#define OMAP4_CM_L3INIT_PCIESS_CLKCTRL_OFFSET		0x0098
 #define OMAP4430_CM_L3INIT_PCIESS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0098)
+#define OMAP4_CM_L3INIT_CCPTX_CLKCTRL_OFFSET		0x00a8
 #define OMAP4430_CM_L3INIT_CCPTX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00a8)
+#define OMAP4_CM_L3INIT_XHPI_CLKCTRL_OFFSET		0x00c0
 #define OMAP4430_CM_L3INIT_XHPI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c0)
+#define OMAP4_CM_L3INIT_MMC6_CLKCTRL_OFFSET		0x00c8
 #define OMAP4430_CM_L3INIT_MMC6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c8)
+#define OMAP4_CM_L3INIT_USB_HOST_FS_CLKCTRL_OFFSET	0x00d0
 #define OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00d0)
+#define OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET	0x00e0
 #define OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00e0)
 
 /* CM2.L4PER_CM2 register offsets */
+#define OMAP4_CM_L4PER_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_L4PER_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0000)
+#define OMAP4_CM_L4PER_DYNAMICDEP_OFFSET		0x0008
 #define OMAP4430_CM_L4PER_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0008)
+#define OMAP4_CM_L4PER_ADC_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_L4PER_ADC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0020)
+#define OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0028)
+#define OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET		0x0030
 #define OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0030)
+#define OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET		0x0038
 #define OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0038)
+#define OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0040)
+#define OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET		0x0048
 #define OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0048)
+#define OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET		0x0050
 #define OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0050)
+#define OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET		0x0058
 #define OMAP4430_CM_L4PER_ELM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0058)
+#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET		0x0060
 #define OMAP4430_CM_L4PER_GPIO2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0060)
+#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET		0x0068
 #define OMAP4430_CM_L4PER_GPIO3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0068)
+#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET		0x0070
 #define OMAP4430_CM_L4PER_GPIO4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0070)
+#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET		0x0078
 #define OMAP4430_CM_L4PER_GPIO5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0078)
+#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET		0x0080
 #define OMAP4430_CM_L4PER_GPIO6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0080)
+#define OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET		0x0088
 #define OMAP4430_CM_L4PER_HDQ1W_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0088)
+#define OMAP4_CM_L4PER_HECC1_CLKCTRL_OFFSET		0x0090
 #define OMAP4430_CM_L4PER_HECC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0090)
+#define OMAP4_CM_L4PER_HECC2_CLKCTRL_OFFSET		0x0098
 #define OMAP4430_CM_L4PER_HECC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0098)
+#define OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET		0x00a0
 #define OMAP4430_CM_L4PER_I2C1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a0)
+#define OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET		0x00a8
 #define OMAP4430_CM_L4PER_I2C2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a8)
+#define OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET		0x00b0
 #define OMAP4430_CM_L4PER_I2C3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b0)
+#define OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET		0x00b8
 #define OMAP4430_CM_L4PER_I2C4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b8)
+#define OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET		0x00c0
 #define OMAP4430_CM_L4PER_L4PER_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00c0)
+#define OMAP4_CM_L4PER_MCASP2_CLKCTRL_OFFSET		0x00d0
 #define OMAP4430_CM_L4PER_MCASP2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d0)
+#define OMAP4_CM_L4PER_MCASP3_CLKCTRL_OFFSET		0x00d8
 #define OMAP4430_CM_L4PER_MCASP3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d8)
+#define OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET		0x00e0
 #define OMAP4430_CM_L4PER_MCBSP4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e0)
+#define OMAP4_CM_L4PER_MGATE_CLKCTRL_OFFSET		0x00e8
 #define OMAP4430_CM_L4PER_MGATE_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e8)
+#define OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET		0x00f0
 #define OMAP4430_CM_L4PER_MCSPI1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f0)
+#define OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET		0x00f8
 #define OMAP4430_CM_L4PER_MCSPI2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f8)
+#define OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET		0x0100
 #define OMAP4430_CM_L4PER_MCSPI3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0100)
+#define OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET		0x0108
 #define OMAP4430_CM_L4PER_MCSPI4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0108)
+#define OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET		0x0120
 #define OMAP4430_CM_L4PER_MMCSD3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0120)
+#define OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET		0x0128
 #define OMAP4430_CM_L4PER_MMCSD4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0128)
+#define OMAP4_CM_L4PER_MSPROHG_CLKCTRL_OFFSET		0x0130
 #define OMAP4430_CM_L4PER_MSPROHG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0130)
+#define OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET		0x0138
 #define OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0138)
+#define OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET		0x0140
 #define OMAP4430_CM_L4PER_UART1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0140)
+#define OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET		0x0148
 #define OMAP4430_CM_L4PER_UART2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0148)
+#define OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET		0x0150
 #define OMAP4430_CM_L4PER_UART3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0150)
+#define OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET		0x0158
 #define OMAP4430_CM_L4PER_UART4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0158)
+#define OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET		0x0160
 #define OMAP4430_CM_L4PER_MMCSD5_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0160)
+#define OMAP4_CM_L4PER_I2C5_CLKCTRL_OFFSET		0x0168
 #define OMAP4430_CM_L4PER_I2C5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0168)
+#define OMAP4_CM_L4SEC_CLKSTCTRL_OFFSET			0x0180
 #define OMAP4430_CM_L4SEC_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0180)
+#define OMAP4_CM_L4SEC_STATICDEP_OFFSET			0x0184
 #define OMAP4430_CM_L4SEC_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0184)
+#define OMAP4_CM_L4SEC_DYNAMICDEP_OFFSET		0x0188
 #define OMAP4430_CM_L4SEC_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0188)
+#define OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET		0x01a0
 #define OMAP4430_CM_L4SEC_AES1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a0)
+#define OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET		0x01a8
 #define OMAP4430_CM_L4SEC_AES2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a8)
+#define OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET		0x01b0
 #define OMAP4430_CM_L4SEC_DES3DES_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b0)
+#define OMAP4_CM_L4SEC_PKAEIP29_CLKCTRL_OFFSET		0x01b8
 #define OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b8)
+#define OMAP4_CM_L4SEC_RNG_CLKCTRL_OFFSET		0x01c0
 #define OMAP4430_CM_L4SEC_RNG_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c0)
+#define OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET		0x01c8
 #define OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c8)
+#define OMAP4_CM_L4SEC_CRYPTODMA_CLKCTRL_OFFSET		0x01d8
 #define OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01d8)
 
 /* CM2.CEFUSE_CM2 register offsets */
+#define OMAP4_CM_CEFUSE_CLKSTCTRL_OFFSET		0x0000
 #define OMAP4430_CM_CEFUSE_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0000)
+#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0020)
-
-/* CM2.RESTORE_CM2 register offsets */
-#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0000)
-#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0004)
-#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0008)
-#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x000c)
-#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0010)
-#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0014)
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0018)
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x001c)
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0020)
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0024)
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0028)
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x002c)
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0030)
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0034)
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0038)
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x003c)
-#define OMAP4430_CM_SDMA_STATICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0040)
 #endif
diff --git a/arch/arm/mach-omap2/cm4xxx.c b/arch/arm/mach-omap2/cm4xxx.c
index 4af76bb..b101091 100644
--- a/arch/arm/mach-omap2/cm4xxx.c
+++ b/arch/arm/mach-omap2/cm4xxx.c
@@ -21,35 +21,41 @@
 
 #include <asm/atomic.h>
 
+#include <plat/common.h>
+
 #include "cm.h"
-
-/* XXX move this to cm.h */
-/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
-#define MAX_MODULE_READY_TIME			20000
-
-/*
- * OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK: isolates the IDLEST field in the
- * CM_CLKCTRL register.
- */
-#define OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK	(0x2 << 16)
-
-/*
- * OMAP4 prcm_mod u32 fields contain packed data: the CM ID in bit 16 and
- * the PRCM module offset address (from the CM module base) in bits 15-0.
- */
-#define OMAP4_PRCM_MOD_CM_ID_SHIFT		16
-#define OMAP4_PRCM_MOD_OFFS_MASK		0xffff
+#include "cm-regbits-44xx.h"
 
 /**
- * omap4_cm_wait_idlest_ready - wait for a module to leave idle or standby
- * @prcm_mod: PRCM module offset (XXX example)
- * @prcm_dev_offs: PRCM device offset (e.g. MCASP XXX example)
+ * omap4_cm_wait_module_ready - wait for a module to be in 'func' state
+ * @clkctrl_reg: CLKCTRL module address
  *
- * XXX document
+ * Wait for the module IDLEST to be functional. If the idle state is in any
+ * the non functional state (trans, idle or disabled), module and thus the
+ * sysconfig cannot be accessed and will probably lead to an "imprecise
+ * external abort"
+ *
+ * Module idle state:
+ *   0x0 func:     Module is fully functional, including OCP
+ *   0x1 trans:    Module is performing transition: wakeup, or sleep, or sleep
+ *                 abortion
+ *   0x2 idle:     Module is in Idle mode (only OCP part). It is functional if
+ *                 using separate functional clock
+ *   0x3 disabled: Module is disabled and cannot be accessed
+ *
+ * TODO: Need to handle module accessible in idle state
  */
-int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs)
+int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
 {
-	/* FIXME: Add clock manager related code */
-	return 0;
+	int i = 0;
+
+	if (!clkctrl_reg)
+		return 0;
+
+	omap_test_timeout(((__raw_readl(clkctrl_reg) &
+			    OMAP4430_IDLEST_MASK) == 0),
+			  MAX_MODULE_READY_TIME, i);
+
+	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
 }
 
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 43f8a33..a8d20ee 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -194,11 +194,12 @@
 	u32 offset = 0;
 	v_addr = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD_ROM);
 	if (prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) &
-		OMAP3430_GLOBAL_COLD_RST) {
+	    OMAP3430_GLOBAL_COLD_RST_MASK) {
 		for ( ; offset <= max_offset; offset += 0x4)
 			__raw_writel(0x0, (v_addr + offset));
-		prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST, OMAP3430_GR_MOD,
-			OMAP3_PRM_RSTST_OFFSET);
+		prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK,
+				     OMAP3430_GR_MOD,
+				     OMAP3_PRM_RSTST_OFFSET);
 	}
 }
 
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 12154d1..03e6c9e 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -28,6 +28,7 @@
 #include <plat/mux.h>
 #include <mach/gpio.h>
 #include <plat/mmc.h>
+#include <plat/dma.h>
 
 #include "mux.h"
 
@@ -486,8 +487,10 @@
 }
 
 
-#ifdef CONFIG_OMAP_SHA1_MD5
-static struct resource sha1_md5_resources[] = {
+#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP2
+static struct resource omap2_sham_resources[] = {
 	{
 		.start	= OMAP24XX_SEC_SHA1MD5_BASE,
 		.end	= OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
@@ -498,20 +501,55 @@
 		.flags	= IORESOURCE_IRQ,
 	}
 };
+static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
+#else
+#define omap2_sham_resources		NULL
+#define omap2_sham_resources_sz		0
+#endif
 
-static struct platform_device sha1_md5_device = {
-	.name		= "OMAP SHA1/MD5",
+#ifdef CONFIG_ARCH_OMAP3
+static struct resource omap3_sham_resources[] = {
+	{
+		.start	= OMAP34XX_SEC_SHA1MD5_BASE,
+		.end	= OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_34XX_SHA1MD52_IRQ,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= OMAP34XX_DMA_SHA1MD5_RX,
+		.flags	= IORESOURCE_DMA,
+	}
+};
+static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
+#else
+#define omap3_sham_resources		NULL
+#define omap3_sham_resources_sz		0
+#endif
+
+static struct platform_device sham_device = {
+	.name		= "omap-sham",
 	.id		= -1,
-	.num_resources	= ARRAY_SIZE(sha1_md5_resources),
-	.resource	= sha1_md5_resources,
 };
 
-static void omap_init_sha1_md5(void)
+static void omap_init_sham(void)
 {
-	platform_device_register(&sha1_md5_device);
+	if (cpu_is_omap24xx()) {
+		sham_device.resource = omap2_sham_resources;
+		sham_device.num_resources = omap2_sham_resources_sz;
+	} else if (cpu_is_omap34xx()) {
+		sham_device.resource = omap3_sham_resources;
+		sham_device.num_resources = omap3_sham_resources_sz;
+	} else {
+		pr_err("%s: platform not supported\n", __func__);
+		return;
+	}
+	platform_device_register(&sham_device);
 }
 #else
-static inline void omap_init_sha1_md5(void) { }
+static inline void omap_init_sham(void) { }
 #endif
 
 /*-------------------------------------------------------------------------*/
@@ -624,6 +662,15 @@
 static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
 			int controller_nr)
 {
+	if ((mmc_controller->slots[0].switch_pin > 0) && \
+		(mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
+		omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
+					OMAP_PIN_INPUT_PULLUP);
+	if ((mmc_controller->slots[0].gpio_wp > 0) && \
+		(mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
+		omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
+					OMAP_PIN_INPUT_PULLUP);
+
 	if (cpu_is_omap2420() && controller_nr == 0) {
 		omap_cfg_reg(H18_24XX_MMC_CMD);
 		omap_cfg_reg(H15_24XX_MMC_CLKI);
@@ -819,6 +866,33 @@
 static inline void omap_hdq_init(void) {}
 #endif
 
+/*---------------------------------------------------------------------------*/
+
+#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
+	defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
+};
+#else
+static struct resource omap_vout_resource[2] = {
+};
+#endif
+
+static struct platform_device omap_vout_device = {
+	.name		= "omap_vout",
+	.num_resources	= ARRAY_SIZE(omap_vout_resource),
+	.resource 	= &omap_vout_resource[0],
+	.id		= -1,
+};
+static void omap_init_vout(void)
+{
+	if (platform_device_register(&omap_vout_device) < 0)
+		printk(KERN_ERR "Unable to register OMAP-VOUT device\n");
+}
+#else
+static inline void omap_init_vout(void) {}
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 static int __init omap2_init_devices(void)
@@ -833,7 +907,8 @@
 	omap_init_pmu();
 	omap_hdq_init();
 	omap_init_sti();
-	omap_init_sha1_md5();
+	omap_init_sham();
+	omap_init_vout();
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 9ad2295..1ef54b0 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -24,6 +24,7 @@
 
 static u16 control_pbias_offset;
 static u16 control_devconf1_offset;
+static u16 control_mmc1;
 
 #define HSMMC_NAME_LEN	9
 
@@ -42,7 +43,7 @@
 #define hsmmc_get_context_loss NULL
 #endif
 
-static void hsmmc1_before_set_reg(struct device *dev, int slot,
+static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
 				  int power_on, int vdd)
 {
 	u32 reg, prog_io;
@@ -95,7 +96,7 @@
 	}
 }
 
-static void hsmmc1_after_set_reg(struct device *dev, int slot,
+static void omap_hsmmc1_after_set_reg(struct device *dev, int slot,
 				 int power_on, int vdd)
 {
 	u32 reg;
@@ -119,6 +120,60 @@
 	}
 }
 
+static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
+				  int power_on, int vdd)
+{
+	u32 reg;
+
+	/*
+	 * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
+	 * card with Vcc regulator (from twl4030 or whatever).  OMAP has both
+	 * 1.8V and 3.0V modes, controlled by the PBIAS register.
+	 *
+	 * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
+	 * is most naturally TWL VSIM; those pins also use PBIAS.
+	 *
+	 * FIXME handle VMMC1A as needed ...
+	 */
+	reg = omap_ctrl_readl(control_pbias_offset);
+	reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
+					OMAP4_USBC1_ICUSB_PWRDNZ);
+	omap_ctrl_writel(reg, control_pbias_offset);
+}
+
+static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
+				 int power_on, int vdd)
+{
+	u32 reg;
+
+	if (power_on) {
+		reg = omap_ctrl_readl(control_pbias_offset);
+		reg |= OMAP4_MMC1_PBIASLITE_PWRDNZ;
+		if ((1 << vdd) <= MMC_VDD_165_195)
+			reg &= ~OMAP4_MMC1_PBIASLITE_VMODE;
+		else
+			reg |= OMAP4_MMC1_PBIASLITE_VMODE;
+		reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
+						OMAP4_USBC1_ICUSB_PWRDNZ);
+		omap_ctrl_writel(reg, control_pbias_offset);
+		/* 4 microsec delay for comparator to generate an error*/
+		udelay(4);
+		reg = omap_ctrl_readl(control_pbias_offset);
+		if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR) {
+			pr_err("Pbias Voltage is not same as LDO\n");
+			/* Caution : On VMODE_ERROR Power Down MMC IO */
+			reg &= ~(OMAP4_MMC1_PWRDNZ | OMAP4_USBC1_ICUSB_PWRDNZ);
+			omap_ctrl_writel(reg, control_pbias_offset);
+		}
+	} else {
+		reg = omap_ctrl_readl(control_pbias_offset);
+		 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ |
+			OMAP4_MMC1_PBIASLITE_VMODE | OMAP4_MMC1_PWRDNZ |
+			OMAP4_USBC1_ICUSB_PWRDNZ);
+		omap_ctrl_writel(reg, control_pbias_offset);
+	}
+}
+
 static void hsmmc23_before_set_reg(struct device *dev, int slot,
 				   int power_on, int vdd)
 {
@@ -139,6 +194,12 @@
 	}
 }
 
+static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
+							int vdd)
+{
+	return 0;
+}
+
 static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata;
 
 void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
@@ -146,13 +207,28 @@
 	struct omap2_hsmmc_info *c;
 	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
 	int i;
+	u32 reg;
 
-	if (cpu_is_omap2430()) {
-		control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
-		control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
+	if (!cpu_is_omap44xx()) {
+		if (cpu_is_omap2430()) {
+			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
+			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
+		} else {
+			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
+			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
+		}
 	} else {
-		control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
-		control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
+		control_pbias_offset = OMAP44XX_CONTROL_PBIAS_LITE;
+		control_mmc1 = OMAP44XX_CONTROL_MMC1;
+		reg = omap_ctrl_readl(control_mmc1);
+		reg |= (OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 |
+			OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1);
+		reg &= ~(OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 |
+			OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3);
+		reg |= (OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL |
+			OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL |
+			OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL);
+		omap_ctrl_writel(reg, control_mmc1);
 	}
 
 	for (c = controllers; c->mmc; c++) {
@@ -216,11 +292,27 @@
 		 */
 		mmc->slots[0].ocr_mask = c->ocr_mask;
 
+		if (cpu_is_omap3517() || cpu_is_omap3505())
+			mmc->slots[0].set_power = nop_mmc_set_power;
+		else
+			mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+
 		switch (c->mmc) {
 		case 1:
-			/* on-chip level shifting via PBIAS0/PBIAS1 */
-			mmc->slots[0].before_set_reg = hsmmc1_before_set_reg;
-			mmc->slots[0].after_set_reg = hsmmc1_after_set_reg;
+			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+				/* on-chip level shifting via PBIAS0/PBIAS1 */
+				if (cpu_is_omap44xx()) {
+					mmc->slots[0].before_set_reg =
+						omap4_hsmmc1_before_set_reg;
+					mmc->slots[0].after_set_reg =
+						omap4_hsmmc1_after_set_reg;
+				} else {
+					mmc->slots[0].before_set_reg =
+						omap_hsmmc1_before_set_reg;
+					mmc->slots[0].after_set_reg =
+						omap_hsmmc1_after_set_reg;
+				}
+			}
 
 			/* Omap3630 HSMMC1 supports only 4-bit */
 			if (cpu_is_omap3630() && c->wires > 4) {
@@ -235,9 +327,11 @@
 				c->wires = 4;
 			/* FALLTHROUGH */
 		case 3:
-			/* off-chip level shifting, or none */
-			mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
-			mmc->slots[0].after_set_reg = NULL;
+			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+				/* off-chip level shifting, or none */
+				mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
+				mmc->slots[0].after_set_reg = NULL;
+			}
 			break;
 		default:
 			pr_err("MMC%d configuration not supported!\n", c->mmc);
diff --git a/arch/arm/mach-omap2/include/mach/am35xx.h b/arch/arm/mach-omap2/include/mach/am35xx.h
index a705f94..f1e13d1 100644
--- a/arch/arm/mach-omap2/include/mach/am35xx.h
+++ b/arch/arm/mach-omap2/include/mach/am35xx.h
@@ -23,4 +23,22 @@
 #define AM35XX_IPSS_HECC_BASE		0x5C050000
 #define AM35XX_IPSS_VPFE_BASE		0x5C060000
 
-#endif /*  __ASM_ARCH_AM35XX_H */
+
+/* HECC module specifc offset definitions */
+#define AM35XX_HECC_SCC_HECC_OFFSET	(0x0)
+#define AM35XX_HECC_SCC_RAM_OFFSET	(0x3000)
+#define AM35XX_HECC_RAM_OFFSET		(0x3000)
+#define AM35XX_HECC_MBOX_OFFSET		(0x2000)
+#define AM35XX_HECC_INT_LINE		(0x0)
+#define AM35XX_HECC_VERSION		(0x1)
+
+#define AM35XX_EMAC_CNTRL_OFFSET	(0x10000)
+#define AM35XX_EMAC_CNTRL_MOD_OFFSET	(0x0)
+#define AM35XX_EMAC_CNTRL_RAM_OFFSET	(0x20000)
+#define AM35XX_EMAC_MDIO_OFFSET		(0x30000)
+#define AM35XX_EMAC_CNTRL_RAM_SIZE	(0x2000)
+#define AM35XX_EMAC_RAM_ADDR		(AM3517_EMAC_BASE + \
+						AM3517_EMAC_CNTRL_RAM_OFFSET)
+#define AM35XX_EMAC_HW_RAM_ADDR		(0x01E20000)
+
+#endif  /*  __ASM_ARCH_AM35XX_H */
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index 4a63a2e..35b2440 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
 
 #include <linux/serial_reg.h>
 
+#include <asm/memory.h>
+
 #include <plat/serial.h>
 
 #define UART_OFFSET(addr)	((addr) & 0x00ffffff)
@@ -40,13 +42,12 @@
 		cmp	\rx, #0			@ is port configured?
 		bne	99f			@ already configured
 
-		/* Check UART1 scratchpad register for uart to use */
+		/* Check the debug UART configuration set in uncompress.h */
 		mrc	p15, 0, \rx, c1, c0
 		tst	\rx, #1			@ MMU enabled?
-		moveq	\rx, #0x48000000	@ physical base address
-		movne	\rx, #0xfa000000	@ virtual base
-		orr	\rx, \rx, #0x0006a000	@ uart1 on omap2/3/4
-		ldrb	\rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)] @ scratchpad
+		ldreq	\rx, =OMAP_UART_INFO
+		ldrne	\rx, =__phys_to_virt(OMAP_UART_INFO)
+		ldr	\rx, [\rx, #0]
 
 		/* Select the UART to use based on the UART1 scratchpad value */
 		cmp	\rx, #0			@ no port configured?
@@ -87,10 +88,10 @@
 		b	98f
 44:		mov	\rx, #UART_OFFSET(OMAP4_UART4_BASE)
 		b	98f
-95:		mov	\rx, #ZOOM_UART_BASE
+95:		ldr	\rx, =ZOOM_UART_BASE
 		ldr	\tmp, =omap_uart_phys
 		str	\rx, [\tmp, #0]
-		mov	\rx, #ZOOM_UART_VIRT
+		ldr	\rx, =ZOOM_UART_VIRT
 		ldr	\tmp, =omap_uart_virt
 		str	\rx, [\tmp, #0]
 		mov	\rx, #(UART_LSR << ZOOM_PORT_SHIFT)
diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
new file mode 100644
index 0000000..423af3a
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap4-common.h
@@ -0,0 +1,26 @@
+/*
+ * omap4-common.h: OMAP4 specific common header file
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Author:
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP4_COMMON_H
+#define OMAP_ARCH_OMAP4_COMMON_H
+
+#ifdef CONFIG_CACHE_L2X0
+extern void __iomem *l2cache_base;
+#endif
+
+extern void __iomem *gic_cpu_base_addr;
+extern void __iomem *gic_dist_base_addr;
+
+extern void __init gic_init_irq(void);
+extern void omap_smc1(u32 fn, u32 arg);
+
+#endif
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 87f676a..3cfb425 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -166,6 +166,15 @@
 		.length		= L4_EMU_34XX_SIZE,
 		.type		= MT_DEVICE
 	},
+#if defined(CONFIG_DEBUG_LL) &&							\
+	(defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
+	{
+		.virtual	= ZOOM_UART_VIRT,
+		.pfn		= __phys_to_pfn(ZOOM_UART_BASE),
+		.length		= SZ_1M,
+		.type		= MT_DEVICE
+	},
+#endif
 };
 #endif
 #ifdef	CONFIG_ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index 4f63dc6..e82da68 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -147,6 +147,7 @@
 	printk("\n");
 
 	iommu_write_reg(obj, stat, MMU_IRQSTATUS);
+	omap2_iommu_disable(obj);
 	return stat;
 }
 
@@ -184,7 +185,7 @@
 	if (!cr)
 		return ERR_PTR(-ENOMEM);
 
-	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz;
+	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
 	cr->ram = e->pa | e->endian | e->elsz | e->mixed;
 
 	return cr;
@@ -212,7 +213,8 @@
 	char *p = buf;
 
 	/* FIXME: Need more detail analysis of cam/ram */
-	p += sprintf(p, "%08x %08x\n", cr->cam, cr->ram);
+	p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+					(cr->cam & MMU_CAM_P) ? 1 : 0);
 
 	return p - buf;
 }
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 07aa7b3..2ff4dce 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -1901,26 +1901,15 @@
 	_OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
 	_OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
 	_OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
-	_OMAP3_BALLENTRY(GPMC_D0, "k1", "m2"),
-	_OMAP3_BALLENTRY(GPMC_D1, "l1", "m1"),
 	_OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
 	_OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
 	_OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
 	_OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
 	_OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
 	_OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
-	_OMAP3_BALLENTRY(GPMC_D2, "l2", "n2"),
-	_OMAP3_BALLENTRY(GPMC_D3, "p2", "n1"),
-	_OMAP3_BALLENTRY(GPMC_D4, "t1", "r2"),
-	_OMAP3_BALLENTRY(GPMC_D5, "v1", "r1"),
-	_OMAP3_BALLENTRY(GPMC_D6, "v2", "t2"),
-	_OMAP3_BALLENTRY(GPMC_D7, "w2", "t1"),
-	_OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
 	_OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
-	_OMAP3_BALLENTRY(GPMC_NADV_ALE, "f3", "w1"),
 	_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
 	_OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS0, "g4", "y2"),
 	_OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
 	_OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
 	_OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
@@ -1928,10 +1917,7 @@
 	_OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
 	_OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
 	_OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NOE, "g2", "v2"),
-	_OMAP3_BALLENTRY(GPMC_NWE, "f4", "v1"),
 	_OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
-	_OMAP3_BALLENTRY(GPMC_WAIT0, "m8", "ab12"),
 	_OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
 	_OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
 	_OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
@@ -1948,8 +1934,6 @@
 	_OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
 	_OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
 	_OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
-	_OMAP3_BALLENTRY(I2C1_SCL, "k21", NULL),
-	_OMAP3_BALLENTRY(I2C1_SDA, "j21", NULL),
 	_OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
 	_OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
 	_OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
@@ -1958,11 +1942,6 @@
 	_OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
 	_OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
 	_OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
-	_OMAP3_BALLENTRY(JTAG_RTCK, "aa12", NULL),
-	_OMAP3_BALLENTRY(JTAG_TCK, "aa13", NULL),
-	_OMAP3_BALLENTRY(JTAG_TDI, "aa20", NULL),
-	_OMAP3_BALLENTRY(JTAG_TDO, "aa19", NULL),
-	_OMAP3_BALLENTRY(JTAG_TMS_TMSC, "aa18", NULL),
 	_OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
 	_OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
 	_OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
@@ -2010,77 +1989,12 @@
 	_OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
 	_OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
 	_OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
-	_OMAP3_BALLENTRY(SDRC_A0, NULL, "n22"),
-	_OMAP3_BALLENTRY(SDRC_A1, NULL, "n23"),
-	_OMAP3_BALLENTRY(SDRC_A10, NULL, "v22"),
-	_OMAP3_BALLENTRY(SDRC_A11, NULL, "v23"),
-	_OMAP3_BALLENTRY(SDRC_A12, NULL, "w22"),
-	_OMAP3_BALLENTRY(SDRC_A13, NULL, "w23"),
-	_OMAP3_BALLENTRY(SDRC_A14, NULL, "y22"),
-	_OMAP3_BALLENTRY(SDRC_A2, NULL, "p22"),
-	_OMAP3_BALLENTRY(SDRC_A3, NULL, "p23"),
-	_OMAP3_BALLENTRY(SDRC_A4, NULL, "r22"),
-	_OMAP3_BALLENTRY(SDRC_A5, NULL, "r23"),
-	_OMAP3_BALLENTRY(SDRC_A6, NULL, "t22"),
-	_OMAP3_BALLENTRY(SDRC_A7, NULL, "t23"),
-	_OMAP3_BALLENTRY(SDRC_A8, NULL, "u22"),
-	_OMAP3_BALLENTRY(SDRC_A9, NULL, "u23"),
-	_OMAP3_BALLENTRY(SDRC_BA0, "h9", "ab21"),
-	_OMAP3_BALLENTRY(SDRC_BA1, "h10", "ac21"),
 	_OMAP3_BALLENTRY(SDRC_CKE0, "h16", "j22"),
 	_OMAP3_BALLENTRY(SDRC_CKE1, "h17", "j23"),
-	_OMAP3_BALLENTRY(SDRC_CLK, "a13", "a11"),
-	_OMAP3_BALLENTRY(SDRC_D0, NULL, "j2"),
-	_OMAP3_BALLENTRY(SDRC_D1, NULL, "j1"),
-	_OMAP3_BALLENTRY(SDRC_D10, "c15", "b14"),
-	_OMAP3_BALLENTRY(SDRC_D11, "b16", "a14"),
-	_OMAP3_BALLENTRY(SDRC_D12, "d17", "b16"),
-	_OMAP3_BALLENTRY(SDRC_D13, "c17", "a16"),
-	_OMAP3_BALLENTRY(SDRC_D14, "b17", "b19"),
-	_OMAP3_BALLENTRY(SDRC_D15, "d18", "a19"),
-	_OMAP3_BALLENTRY(SDRC_D16, NULL, "b3"),
-	_OMAP3_BALLENTRY(SDRC_D17, NULL, "a3"),
-	_OMAP3_BALLENTRY(SDRC_D18, NULL, "b5"),
-	_OMAP3_BALLENTRY(SDRC_D19, NULL, "a5"),
-	_OMAP3_BALLENTRY(SDRC_D2, NULL, "g2"),
-	_OMAP3_BALLENTRY(SDRC_D20, NULL, "b8"),
-	_OMAP3_BALLENTRY(SDRC_D21, NULL, "a8"),
-	_OMAP3_BALLENTRY(SDRC_D22, NULL, "b9"),
-	_OMAP3_BALLENTRY(SDRC_D23, NULL, "a9"),
-	_OMAP3_BALLENTRY(SDRC_D24, NULL, "b21"),
-	_OMAP3_BALLENTRY(SDRC_D25, NULL, "a21"),
-	_OMAP3_BALLENTRY(SDRC_D26, NULL, "d22"),
-	_OMAP3_BALLENTRY(SDRC_D27, NULL, "d23"),
-	_OMAP3_BALLENTRY(SDRC_D28, NULL, "e22"),
-	_OMAP3_BALLENTRY(SDRC_D29, NULL, "e23"),
-	_OMAP3_BALLENTRY(SDRC_D3, NULL, "g1"),
-	_OMAP3_BALLENTRY(SDRC_D30, NULL, "g22"),
-	_OMAP3_BALLENTRY(SDRC_D31, NULL, "g23"),
-	_OMAP3_BALLENTRY(SDRC_D4, NULL, "f2"),
-	_OMAP3_BALLENTRY(SDRC_D5, NULL, "f1"),
-	_OMAP3_BALLENTRY(SDRC_D6, NULL, "d2"),
-	_OMAP3_BALLENTRY(SDRC_D7, NULL, "d1"),
-	_OMAP3_BALLENTRY(SDRC_D8, "c14", "b13"),
-	_OMAP3_BALLENTRY(SDRC_D9, "b14", "a13"),
-	_OMAP3_BALLENTRY(SDRC_DM0, NULL, "c1"),
-	_OMAP3_BALLENTRY(SDRC_DM1, "a16", "a17"),
-	_OMAP3_BALLENTRY(SDRC_DM2, NULL, "a6"),
-	_OMAP3_BALLENTRY(SDRC_DM3, NULL, "a20"),
-	_OMAP3_BALLENTRY(SDRC_DQS0, NULL, "c2"),
-	_OMAP3_BALLENTRY(SDRC_DQS1, "a17", "b17"),
-	_OMAP3_BALLENTRY(SDRC_DQS2, NULL, "b6"),
-	_OMAP3_BALLENTRY(SDRC_DQS3, NULL, "b20"),
-	_OMAP3_BALLENTRY(SDRC_NCAS, "h13", "l22"),
-	_OMAP3_BALLENTRY(SDRC_NCLK, "a14", "b11"),
-	_OMAP3_BALLENTRY(SDRC_NCS0, "h11", "m22"),
-	_OMAP3_BALLENTRY(SDRC_NCS1, "h12", "m23"),
-	_OMAP3_BALLENTRY(SDRC_NRAS, "h14", "l23"),
-	_OMAP3_BALLENTRY(SDRC_NWE, "h15", "k23"),
 	_OMAP3_BALLENTRY(SIM_CLK, "p26", NULL),
 	_OMAP3_BALLENTRY(SIM_IO, "p27", NULL),
 	_OMAP3_BALLENTRY(SIM_PWRCTRL, "r27", NULL),
 	_OMAP3_BALLENTRY(SIM_RST, "r25", NULL),
-	_OMAP3_BALLENTRY(SYS_32K, "ae25", NULL),
 	_OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
 	_OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
 	_OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
new file mode 100644
index 0000000..eb9bee7
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -0,0 +1,157 @@
+/*
+ * omap iommu: omap device registration
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+
+#include <plat/iommu.h>
+#include <plat/irqs.h>
+
+struct iommu_device {
+	resource_size_t base;
+	int irq;
+	struct iommu_platform_data pdata;
+	struct resource res[2];
+};
+static struct iommu_device *devices;
+static int num_iommu_devices;
+
+#ifdef CONFIG_ARCH_OMAP3
+static struct iommu_device omap3_devices[] = {
+	{
+		.base = 0x480bd400,
+		.irq = 24,
+		.pdata = {
+			.name = "isp",
+			.nr_tlb_entries = 8,
+			.clk_name = "cam_ick",
+		},
+	},
+#if defined(CONFIG_MPU_BRIDGE_IOMMU)
+	{
+		.base = 0x5d000000,
+		.irq = 28,
+		.pdata = {
+			.name = "iva2",
+			.nr_tlb_entries = 32,
+			.clk_name = "iva2_ck",
+		},
+	},
+#endif
+};
+#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices)
+static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES];
+#else
+#define omap3_devices		NULL
+#define NR_OMAP3_IOMMU_DEVICES	0
+#define omap3_iommu_pdev	NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static struct iommu_device omap4_devices[] = {
+	{
+		.base = OMAP4_MMU1_BASE,
+		.irq = INT_44XX_DUCATI_MMU_IRQ,
+		.pdata = {
+			.name = "ducati",
+			.nr_tlb_entries = 32,
+			.clk_name = "ducati_ick",
+		},
+	},
+#if defined(CONFIG_MPU_TESLA_IOMMU)
+	{
+		.base = OMAP4_MMU2_BASE,
+		.irq = INT_44XX_DSP_MMU,
+		.pdata = {
+			.name = "tesla",
+			.nr_tlb_entries = 32,
+			.clk_name = "tesla_ick",
+		},
+	},
+#endif
+};
+#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
+static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
+#else
+#define omap4_devices		NULL
+#define NR_OMAP4_IOMMU_DEVICES	0
+#define omap4_iommu_pdev	NULL
+#endif
+
+static struct platform_device **omap_iommu_pdev;
+
+static int __init omap_iommu_init(void)
+{
+	int i, err;
+	struct resource res[] = {
+		{ .flags = IORESOURCE_MEM },
+		{ .flags = IORESOURCE_IRQ },
+	};
+
+	if (cpu_is_omap34xx()) {
+		devices = omap3_devices;
+		omap_iommu_pdev = omap3_iommu_pdev;
+		num_iommu_devices = NR_OMAP3_IOMMU_DEVICES;
+	} else if (cpu_is_omap44xx()) {
+		devices = omap4_devices;
+		omap_iommu_pdev = omap4_iommu_pdev;
+		num_iommu_devices = NR_OMAP4_IOMMU_DEVICES;
+	} else
+		return -ENODEV;
+
+	for (i = 0; i < num_iommu_devices; i++) {
+		struct platform_device *pdev;
+		const struct iommu_device *d = &devices[i];
+
+		pdev = platform_device_alloc("omap-iommu", i);
+		if (!pdev) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		res[0].start = d->base;
+		res[0].end = d->base + MMU_REG_SIZE - 1;
+		res[1].start = res[1].end = d->irq;
+
+		err = platform_device_add_resources(pdev, res,
+						    ARRAY_SIZE(res));
+		if (err)
+			goto err_out;
+		err = platform_device_add_data(pdev, &d->pdata,
+					       sizeof(d->pdata));
+		if (err)
+			goto err_out;
+		err = platform_device_add(pdev);
+		if (err)
+			goto err_out;
+		omap_iommu_pdev[i] = pdev;
+	}
+	return 0;
+
+err_out:
+	while (i--)
+		platform_device_put(omap_iommu_pdev[i]);
+	return err;
+}
+module_init(omap_iommu_init);
+
+static void __exit omap_iommu_exit(void)
+{
+	int i;
+
+	for (i = 0; i < num_iommu_devices; i++)
+		platform_device_unregister(omap_iommu_pdev[i]);
+}
+module_exit(omap_iommu_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU");
+MODULE_DESCRIPTION("omap iommu: omap device registration");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 38153e5..1cf5231 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -24,7 +24,7 @@
 #include <asm/localtimer.h>
 #include <asm/smp_scu.h>
 #include <mach/hardware.h>
-#include <plat/common.h>
+#include <mach/omap4-common.h>
 
 /* SCU base address */
 static void __iomem *scu_base;
diff --git a/arch/arm/mach-omap2/omap3-iommu.c b/arch/arm/mach-omap2/omap3-iommu.c
deleted file mode 100644
index fbbcb5c..0000000
--- a/arch/arm/mach-omap2/omap3-iommu.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * omap iommu: omap3 device registration
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-
-#include <plat/iommu.h>
-
-struct iommu_device {
-	resource_size_t base;
-	int irq;
-	struct iommu_platform_data pdata;
-	struct resource res[2];
-};
-
-static struct iommu_device devices[] = {
-	{
-		.base = 0x480bd400,
-		.irq = 24,
-		.pdata = {
-			.name = "isp",
-			.nr_tlb_entries = 8,
-			.clk_name = "cam_ick",
-		},
-	},
-#if defined(CONFIG_MPU_BRIDGE_IOMMU)
-	{
-		.base = 0x5d000000,
-		.irq = 28,
-		.pdata = {
-			.name = "iva2",
-			.nr_tlb_entries = 32,
-			.clk_name = "iva2_ck",
-		},
-	},
-#endif
-};
-#define NR_IOMMU_DEVICES ARRAY_SIZE(devices)
-
-static struct platform_device *omap3_iommu_pdev[NR_IOMMU_DEVICES];
-
-static int __init omap3_iommu_init(void)
-{
-	int i, err;
-	struct resource res[] = {
-		{ .flags = IORESOURCE_MEM },
-		{ .flags = IORESOURCE_IRQ },
-	};
-
-	for (i = 0; i < NR_IOMMU_DEVICES; i++) {
-		struct platform_device *pdev;
-		const struct iommu_device *d = &devices[i];
-
-		pdev = platform_device_alloc("omap-iommu", i);
-		if (!pdev) {
-			err = -ENOMEM;
-			goto err_out;
-		}
-
-		res[0].start = d->base;
-		res[0].end = d->base + MMU_REG_SIZE - 1;
-		res[1].start = res[1].end = d->irq;
-
-		err = platform_device_add_resources(pdev, res,
-						    ARRAY_SIZE(res));
-		if (err)
-			goto err_out;
-		err = platform_device_add_data(pdev, &d->pdata,
-					       sizeof(d->pdata));
-		if (err)
-			goto err_out;
-		err = platform_device_add(pdev);
-		if (err)
-			goto err_out;
-		omap3_iommu_pdev[i] = pdev;
-	}
-	return 0;
-
-err_out:
-	while (i--)
-		platform_device_put(omap3_iommu_pdev[i]);
-	return err;
-}
-module_init(omap3_iommu_init);
-
-static void __exit omap3_iommu_exit(void)
-{
-	int i;
-
-	for (i = 0; i < NR_IOMMU_DEVICES; i++)
-		platform_device_unregister(omap3_iommu_pdev[i]);
-}
-module_exit(omap3_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU");
-MODULE_DESCRIPTION("omap iommu: omap3 device registration");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
new file mode 100644
index 0000000..13dc979
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -0,0 +1,72 @@
+/*
+ * OMAP4 specific common source file.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Author:
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <mach/hardware.h>
+#include <mach/omap4-common.h>
+
+#ifdef CONFIG_CACHE_L2X0
+void __iomem *l2cache_base;
+#endif
+
+void __iomem *gic_cpu_base_addr;
+void __iomem *gic_dist_base_addr;
+
+
+void __init gic_init_irq(void)
+{
+	/* Static mapping, never released */
+	gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
+	BUG_ON(!gic_dist_base_addr);
+	gic_dist_init(0, gic_dist_base_addr, 29);
+
+	/* Static mapping, never released */
+	gic_cpu_base_addr = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
+	BUG_ON(!gic_cpu_base_addr);
+	gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static int __init omap_l2_cache_init(void)
+{
+	/*
+	 * To avoid code running on other OMAPs in
+	 * multi-omap builds
+	 */
+	if (!cpu_is_omap44xx())
+		return -ENODEV;
+
+	/* Static mapping, never released */
+	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
+	BUG_ON(!l2cache_base);
+
+	/* Enable PL310 L2 Cache controller */
+	omap_smc1(0x102, 0x1);
+
+	/*
+	 * 32KB way size, 16-way associativity,
+	 * parity disabled
+	 */
+	l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
+
+	return 0;
+}
+early_initcall(omap_l2_cache_init);
+#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2c12e8c..95c9a5f 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2,12 +2,12 @@
  * omap_hwmod implementation for OMAP2/3/4
  *
  * Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
- * With fixes and testing from Kevin Hilman
  *
- * Created in collaboration with (alphabetical order): Benoit Cousson,
- * Kevin Hilman, Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari
- * Poussa, Anand Sawant, Santosh Shilimkar, Richard Woodruff
+ * Paul Walmsley, Benoît Cousson, Kevin Hilman
+ *
+ * Created in collaboration with (alphabetical order): Thara Gopinath,
+ * Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari Poussa, Anand
+ * Sawant, Santosh Shilimkar, Richard Woodruff
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -57,7 +57,7 @@
 #define MAX_MODULE_RESET_WAIT		10000
 
 /* Name of the OMAP hwmod for the MPU */
-#define MPU_INITIATOR_NAME		"mpu_hwmod"
+#define MPU_INITIATOR_NAME		"mpu"
 
 /* omap_hwmod_list contains all registered struct omap_hwmods */
 static LIST_HEAD(omap_hwmod_list);
@@ -403,21 +403,20 @@
  */
 static int _init_main_clk(struct omap_hwmod *oh)
 {
-	struct clk *c;
 	int ret = 0;
 
 	if (!oh->main_clk)
 		return 0;
 
-	c = omap_clk_get_by_name(oh->main_clk);
-	WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get main_clk %s\n",
-	     oh->name, oh->main_clk);
-	if (IS_ERR(c))
-		ret = -EINVAL;
-	oh->_clk = c;
+	oh->_clk = omap_clk_get_by_name(oh->main_clk);
+	if (!oh->_clk)
+		pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n",
+			   oh->name, oh->main_clk);
+		return -EINVAL;
 
-	WARN(!c->clkdm, "omap_hwmod: %s: missing clockdomain for %s.\n",
-	     oh->main_clk, c->name);
+	if (!oh->_clk->clkdm)
+		pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n",
+			   oh->main_clk, oh->_clk->name);
 
 	return ret;
 }
@@ -431,7 +430,6 @@
  */
 static int _init_interface_clks(struct omap_hwmod *oh)
 {
-	struct omap_hwmod_ocp_if *os;
 	struct clk *c;
 	int i;
 	int ret = 0;
@@ -439,14 +437,16 @@
 	if (oh->slaves_cnt == 0)
 		return 0;
 
-	for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+	for (i = 0; i < oh->slaves_cnt; i++) {
+		struct omap_hwmod_ocp_if *os = oh->slaves[i];
+
 		if (!os->clk)
 			continue;
 
 		c = omap_clk_get_by_name(os->clk);
-		WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get "
-		     "interface_clk %s\n", oh->name, os->clk);
-		if (IS_ERR(c))
+		if (!c)
+			pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
+				   oh->name, os->clk);
 			ret = -EINVAL;
 		os->_clk = c;
 	}
@@ -470,9 +470,9 @@
 
 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) {
 		c = omap_clk_get_by_name(oc->clk);
-		WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get opt_clk "
-		     "%s\n", oh->name, oc->clk);
-		if (IS_ERR(c))
+		if (!c)
+			pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
+				   oh->name, oc->clk);
 			ret = -EINVAL;
 		oc->_clk = c;
 	}
@@ -489,19 +489,19 @@
  */
 static int _enable_clocks(struct omap_hwmod *oh)
 {
-	struct omap_hwmod_ocp_if *os;
 	int i;
 
 	pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
 
-	if (oh->_clk && !IS_ERR(oh->_clk))
+	if (oh->_clk)
 		clk_enable(oh->_clk);
 
 	if (oh->slaves_cnt > 0) {
-		for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+		for (i = 0; i < oh->slaves_cnt; i++) {
+			struct omap_hwmod_ocp_if *os = oh->slaves[i];
 			struct clk *c = os->_clk;
 
-			if (c && !IS_ERR(c) && (os->flags & OCPIF_SWSUP_IDLE))
+			if (c && (os->flags & OCPIF_SWSUP_IDLE))
 				clk_enable(c);
 		}
 	}
@@ -519,19 +519,19 @@
  */
 static int _disable_clocks(struct omap_hwmod *oh)
 {
-	struct omap_hwmod_ocp_if *os;
 	int i;
 
 	pr_debug("omap_hwmod: %s: disabling clocks\n", oh->name);
 
-	if (oh->_clk && !IS_ERR(oh->_clk))
+	if (oh->_clk)
 		clk_disable(oh->_clk);
 
 	if (oh->slaves_cnt > 0) {
-		for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+		for (i = 0; i < oh->slaves_cnt; i++) {
+			struct omap_hwmod_ocp_if *os = oh->slaves[i];
 			struct clk *c = os->_clk;
 
-			if (c && !IS_ERR(c) && (os->flags & OCPIF_SWSUP_IDLE))
+			if (c && (os->flags & OCPIF_SWSUP_IDLE))
 				clk_disable(c);
 		}
 	}
@@ -550,14 +550,15 @@
  */
 static int _find_mpu_port_index(struct omap_hwmod *oh)
 {
-	struct omap_hwmod_ocp_if *os;
 	int i;
 	int found = 0;
 
 	if (!oh || oh->slaves_cnt == 0)
 		return -EINVAL;
 
-	for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+	for (i = 0; i < oh->slaves_cnt; i++) {
+		struct omap_hwmod_ocp_if *os = oh->slaves[i];
+
 		if (os->user & OCP_USER_MPU) {
 			found = 1;
 			break;
@@ -592,7 +593,7 @@
 	if (!oh || oh->slaves_cnt == 0)
 		return NULL;
 
-	os = *oh->slaves + index;
+	os = oh->slaves[index];
 
 	for (i = 0, mem = os->addr; i < os->addr_cnt; i++, mem++) {
 		if (mem->flags & ADDR_TYPE_RT) {
@@ -780,9 +781,10 @@
 	ret |= _init_interface_clks(oh);
 	ret |= _init_opt_clks(oh);
 
-	oh->_state = _HWMOD_STATE_CLKS_INITED;
+	if (!ret)
+		oh->_state = _HWMOD_STATE_CLKS_INITED;
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -805,9 +807,9 @@
 	if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
 		return 0;
 
-	os = *oh->slaves + oh->_mpu_port_index;
+	os = oh->slaves[oh->_mpu_port_index];
 
-	if (!(os->flags & OCPIF_HAS_IDLEST))
+	if (oh->flags & HWMOD_NO_IDLEST)
 		return 0;
 
 	/* XXX check module SIDLEMODE */
@@ -818,11 +820,8 @@
 		ret = omap2_cm_wait_module_ready(oh->prcm.omap2.module_offs,
 						 oh->prcm.omap2.idlest_reg_id,
 						 oh->prcm.omap2.idlest_idle_bit);
-#if 0
 	} else if (cpu_is_omap44xx()) {
-		ret = omap4_cm_wait_module_ready(oh->prcm.omap4.module_offs,
-						 oh->prcm.omap4.device_offs);
-#endif
+		ret = omap4_cm_wait_module_ready(oh->prcm.omap4.clkctrl_reg);
 	} else {
 		BUG();
 	};
@@ -911,16 +910,21 @@
 	_add_initiator_dep(oh, mpu_oh);
 	_enable_clocks(oh);
 
-	if (oh->class->sysc) {
-		if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
-			_update_sysc_cache(oh);
-		_sysc_enable(oh);
-	}
-
 	r = _wait_target_ready(oh);
-	if (!r)
+	if (!r) {
 		oh->_state = _HWMOD_STATE_ENABLED;
 
+		/* Access the sysconfig only if the target is ready */
+		if (oh->class->sysc) {
+			if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
+				_update_sysc_cache(oh);
+			_sysc_enable(oh);
+		}
+	} else {
+		pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
+			 oh->name, r);
+	}
+
 	return r;
 }
 
@@ -997,18 +1001,18 @@
  */
 static int _setup(struct omap_hwmod *oh)
 {
-	struct omap_hwmod_ocp_if *os;
-	int i;
+	int i, r;
 
 	if (!oh)
 		return -EINVAL;
 
 	/* Set iclk autoidle mode */
 	if (oh->slaves_cnt > 0) {
-		for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+		for (i = 0; i < oh->slaves_cnt; i++) {
+			struct omap_hwmod_ocp_if *os = oh->slaves[i];
 			struct clk *c = os->_clk;
 
-			if (!c || IS_ERR(c))
+			if (!c)
 				continue;
 
 			if (os->flags & OCPIF_SWSUP_IDLE) {
@@ -1022,7 +1026,12 @@
 
 	oh->_state = _HWMOD_STATE_INITIALIZED;
 
-	_enable(oh);
+	r = _enable(oh);
+	if (r) {
+		pr_warning("omap_hwmod: %s: cannot be enabled (%d)\n",
+			   oh->name, oh->_state);
+		return 0;
+	}
 
 	if (!(oh->flags & HWMOD_INIT_NO_RESET)) {
 		/*
@@ -1430,7 +1439,7 @@
 	ret = oh->mpu_irqs_cnt + oh->sdma_chs_cnt;
 
 	for (i = 0; i < oh->slaves_cnt; i++)
-		ret += (*oh->slaves + i)->addr_cnt;
+		ret += oh->slaves[i]->addr_cnt;
 
 	return ret;
 }
@@ -1471,7 +1480,7 @@
 	for (i = 0; i < oh->slaves_cnt; i++) {
 		struct omap_hwmod_ocp_if *os;
 
-		os = *oh->slaves + i;
+		os = oh->slaves[i];
 
 		for (j = 0; j < os->addr_cnt; j++) {
 			(res + r)->start = (os->addr + j)->pa_start;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index eb7ee24..e5530c5 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -125,7 +125,7 @@
 
 /* MPU */
 static struct omap_hwmod omap2420_mpu_hwmod = {
-	.name		= "mpu_hwmod",
+	.name		= "mpu",
 	.class		= &mpu_hwmod_class,
 	.main_clk	= "mpu_ck",
 	.masters	= omap2420_mpu_masters,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 241bd82..0852d95 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -127,7 +127,7 @@
 
 /* MPU */
 static struct omap_hwmod omap2430_mpu_hwmod = {
-	.name		= "mpu_hwmod",
+	.name		= "mpu",
 	.class		= &mpu_hwmod_class,
 	.main_clk	= "mpu_ck",
 	.masters	= omap2430_mpu_masters,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ed60840..39b0c0e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -156,7 +156,7 @@
 
 /* MPU */
 static struct omap_hwmod omap3xxx_mpu_hwmod = {
-	.name		= "mpu_hwmod",
+	.name		= "mpu",
 	.class		= &mpu_hwmod_class,
 	.main_clk	= "arm_fck",
 	.masters	= omap3xxx_mpu_masters,
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 6cac981..723b44e 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -548,6 +548,9 @@
 {
 	u32 *option = data;
 
+	if (option == &wakeup_timer_milliseconds && val >= 1000)
+		return -EINVAL;
+
 	*option = val;
 
 	if (option == &enable_off_mode)
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index bd6466a..3de6ece 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -43,6 +43,7 @@
 extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state);
 
 extern u32 wakeup_timer_seconds;
+extern u32 wakeup_timer_milliseconds;
 extern struct omap_dm_timer *gptimer_wakeup;
 
 #ifdef CONFIG_PM_DEBUG
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 374299e..e321281 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -70,8 +70,8 @@
 	f2 = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
 
 	/* Ignore UART clocks.  These are handled by UART core (serial.c) */
-	f1 &= ~(OMAP24XX_EN_UART1 | OMAP24XX_EN_UART2);
-	f2 &= ~OMAP24XX_EN_UART3;
+	f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
+	f2 &= ~OMAP24XX_EN_UART3_MASK;
 
 	if (f1 | f2)
 		return 1;
@@ -107,7 +107,7 @@
 	l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
 	omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
 
-	omap2_gpio_prepare_for_retention();
+	omap2_gpio_prepare_for_idle(PWRDM_POWER_RET);
 
 	if (omap2_pm_debug) {
 		omap2_pm_dump(0, 0, 0);
@@ -141,7 +141,7 @@
 		tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
 		omap2_pm_dump(0, 1, tmp);
 	}
-	omap2_gpio_resume_after_retention();
+	omap2_gpio_resume_after_idle();
 
 	clk_enable(osc_ck);
 
@@ -170,7 +170,7 @@
 	u32 l;
 
 	l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
-	return l & (OMAP2420_EN_I2C2 | OMAP2420_EN_I2C1);
+	return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK);
 }
 
 static int sti_console_enabled;
@@ -181,13 +181,13 @@
 
 	/* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
 	l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
-	if (l & (OMAP2420_EN_MMC | OMAP24XX_EN_UART2 |
-		 OMAP24XX_EN_UART1 | OMAP24XX_EN_MCSPI2 |
-		 OMAP24XX_EN_MCSPI1 | OMAP24XX_EN_DSS1))
+	if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
+		 OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
+		 OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
 		return 0;
 	/* Check for UART3. */
 	l = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
-	if (l & OMAP24XX_EN_UART3)
+	if (l & OMAP24XX_EN_UART3_MASK)
 		return 0;
 	if (sti_console_enabled)
 		return 0;
@@ -215,12 +215,12 @@
 
 		/* Try to enter MPU retention */
 		prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
-				  OMAP_LOGICRETSTATE,
+				  OMAP_LOGICRETSTATE_MASK,
 				  MPU_MOD, OMAP2_PM_PWSTCTRL);
 	} else {
 		/* Block MPU retention */
 
-		prm_write_mod_reg(OMAP_LOGICRETSTATE, MPU_MOD,
+		prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
 						 OMAP2_PM_PWSTCTRL);
 		only_idle = 1;
 	}
@@ -288,7 +288,8 @@
 	u32 wken_wkup, mir1;
 
 	wken_wkup = prm_read_mod_reg(WKUP_MOD, PM_WKEN);
-	prm_write_mod_reg(wken_wkup & ~OMAP24XX_EN_GPT1, WKUP_MOD, PM_WKEN);
+	wken_wkup &= ~OMAP24XX_EN_GPT1_MASK;
+	prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
 
 	/* Mask GPT1 */
 	mir1 = omap_readl(0x480fe0a4);
@@ -351,7 +352,7 @@
 	struct powerdomain *pwrdm;
 
 	/* Enable autoidle */
-	prm_write_mod_reg(OMAP24XX_AUTOIDLE, OCP_MOD,
+	prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
 			  OMAP2_PRCM_SYSCONFIG_OFFSET);
 
 	/*
@@ -390,53 +391,54 @@
 	clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
 
 	/* Enable clock autoidle for all domains */
-	cm_write_mod_reg(OMAP24XX_AUTO_CAM |
-			 OMAP24XX_AUTO_MAILBOXES |
-			 OMAP24XX_AUTO_WDT4 |
-			 OMAP2420_AUTO_WDT3 |
-			 OMAP24XX_AUTO_MSPRO |
-			 OMAP2420_AUTO_MMC |
-			 OMAP24XX_AUTO_FAC |
-			 OMAP2420_AUTO_EAC |
-			 OMAP24XX_AUTO_HDQ |
-			 OMAP24XX_AUTO_UART2 |
-			 OMAP24XX_AUTO_UART1 |
-			 OMAP24XX_AUTO_I2C2 |
-			 OMAP24XX_AUTO_I2C1 |
-			 OMAP24XX_AUTO_MCSPI2 |
-			 OMAP24XX_AUTO_MCSPI1 |
-			 OMAP24XX_AUTO_MCBSP2 |
-			 OMAP24XX_AUTO_MCBSP1 |
-			 OMAP24XX_AUTO_GPT12 |
-			 OMAP24XX_AUTO_GPT11 |
-			 OMAP24XX_AUTO_GPT10 |
-			 OMAP24XX_AUTO_GPT9 |
-			 OMAP24XX_AUTO_GPT8 |
-			 OMAP24XX_AUTO_GPT7 |
-			 OMAP24XX_AUTO_GPT6 |
-			 OMAP24XX_AUTO_GPT5 |
-			 OMAP24XX_AUTO_GPT4 |
-			 OMAP24XX_AUTO_GPT3 |
-			 OMAP24XX_AUTO_GPT2 |
-			 OMAP2420_AUTO_VLYNQ |
-			 OMAP24XX_AUTO_DSS,
+	cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
+			 OMAP24XX_AUTO_MAILBOXES_MASK |
+			 OMAP24XX_AUTO_WDT4_MASK |
+			 OMAP2420_AUTO_WDT3_MASK |
+			 OMAP24XX_AUTO_MSPRO_MASK |
+			 OMAP2420_AUTO_MMC_MASK |
+			 OMAP24XX_AUTO_FAC_MASK |
+			 OMAP2420_AUTO_EAC_MASK |
+			 OMAP24XX_AUTO_HDQ_MASK |
+			 OMAP24XX_AUTO_UART2_MASK |
+			 OMAP24XX_AUTO_UART1_MASK |
+			 OMAP24XX_AUTO_I2C2_MASK |
+			 OMAP24XX_AUTO_I2C1_MASK |
+			 OMAP24XX_AUTO_MCSPI2_MASK |
+			 OMAP24XX_AUTO_MCSPI1_MASK |
+			 OMAP24XX_AUTO_MCBSP2_MASK |
+			 OMAP24XX_AUTO_MCBSP1_MASK |
+			 OMAP24XX_AUTO_GPT12_MASK |
+			 OMAP24XX_AUTO_GPT11_MASK |
+			 OMAP24XX_AUTO_GPT10_MASK |
+			 OMAP24XX_AUTO_GPT9_MASK |
+			 OMAP24XX_AUTO_GPT8_MASK |
+			 OMAP24XX_AUTO_GPT7_MASK |
+			 OMAP24XX_AUTO_GPT6_MASK |
+			 OMAP24XX_AUTO_GPT5_MASK |
+			 OMAP24XX_AUTO_GPT4_MASK |
+			 OMAP24XX_AUTO_GPT3_MASK |
+			 OMAP24XX_AUTO_GPT2_MASK |
+			 OMAP2420_AUTO_VLYNQ_MASK |
+			 OMAP24XX_AUTO_DSS_MASK,
 			 CORE_MOD, CM_AUTOIDLE1);
-	cm_write_mod_reg(OMAP24XX_AUTO_UART3 |
-			 OMAP24XX_AUTO_SSI |
-			 OMAP24XX_AUTO_USB,
+	cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
+			 OMAP24XX_AUTO_SSI_MASK |
+			 OMAP24XX_AUTO_USB_MASK,
 			 CORE_MOD, CM_AUTOIDLE2);
-	cm_write_mod_reg(OMAP24XX_AUTO_SDRC |
-			 OMAP24XX_AUTO_GPMC |
-			 OMAP24XX_AUTO_SDMA,
+	cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
+			 OMAP24XX_AUTO_GPMC_MASK |
+			 OMAP24XX_AUTO_SDMA_MASK,
 			 CORE_MOD, CM_AUTOIDLE3);
-	cm_write_mod_reg(OMAP24XX_AUTO_PKA |
-			 OMAP24XX_AUTO_AES |
-			 OMAP24XX_AUTO_RNG |
-			 OMAP24XX_AUTO_SHA |
-			 OMAP24XX_AUTO_DES,
+	cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
+			 OMAP24XX_AUTO_AES_MASK |
+			 OMAP24XX_AUTO_RNG_MASK |
+			 OMAP24XX_AUTO_SHA_MASK |
+			 OMAP24XX_AUTO_DES_MASK,
 			 CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
 
-	cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI, OMAP24XX_DSP_MOD, CM_AUTOIDLE);
+	cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
+			 CM_AUTOIDLE);
 
 	/* Put DPLL and both APLLs into autoidle mode */
 	cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
@@ -444,12 +446,12 @@
 			 (0x03 << OMAP24XX_AUTO_54M_SHIFT),
 			 PLL_MOD, CM_AUTOIDLE);
 
-	cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL |
-			 OMAP24XX_AUTO_WDT1 |
-			 OMAP24XX_AUTO_MPU_WDT |
-			 OMAP24XX_AUTO_GPIOS |
-			 OMAP24XX_AUTO_32KSYNC |
-			 OMAP24XX_AUTO_GPT1,
+	cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
+			 OMAP24XX_AUTO_WDT1_MASK |
+			 OMAP24XX_AUTO_MPU_WDT_MASK |
+			 OMAP24XX_AUTO_GPIOS_MASK |
+			 OMAP24XX_AUTO_32KSYNC_MASK |
+			 OMAP24XX_AUTO_GPT1_MASK,
 			 WKUP_MOD, CM_AUTOIDLE);
 
 	/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
@@ -460,15 +462,15 @@
 	/* Configure automatic voltage transition */
 	prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
 			  OMAP2_PRCM_VOLTSETUP_OFFSET);
-	prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT |
+	prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
 			  (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
-			  OMAP24XX_MEMRETCTRL |
+			  OMAP24XX_MEMRETCTRL_MASK |
 			  (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
 			  (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
 			  OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
 
 	/* Enable wake-up events */
-	prm_write_mod_reg(OMAP24XX_EN_GPIOS | OMAP24XX_EN_GPT1,
+	prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
 			  WKUP_MOD, PM_WKEN);
 }
 
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index ea0000b..2e96771 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -58,6 +58,7 @@
 u32 enable_off_mode;
 u32 sleep_while_idle;
 u32 wakeup_timer_seconds;
+u32 wakeup_timer_milliseconds;
 
 struct power_state {
 	struct powerdomain *pwrdm;
@@ -93,19 +94,20 @@
 	int timeout = 0;
 
 	if (omap_rev() >= OMAP3430_REV_ES3_1) {
-		prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN);
+		prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+				     PM_WKEN);
 		/* Do a readback to assure write has been done */
 		prm_read_mod_reg(WKUP_MOD, PM_WKEN);
 
 		while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) &
-			 OMAP3430_ST_IO_CHAIN)) {
+			 OMAP3430_ST_IO_CHAIN_MASK)) {
 			timeout++;
 			if (timeout > 1000) {
 				printk(KERN_ERR "Wake up daisy chain "
 				       "activation failed.\n");
 				return;
 			}
-			prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN,
+			prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
 					     WKUP_MOD, PM_WKST);
 		}
 	}
@@ -114,7 +116,8 @@
 static void omap3_disable_io_chain(void)
 {
 	if (omap_rev() >= OMAP3430_REV_ES3_1)
-		prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN);
+		prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+				       PM_WKEN);
 }
 
 static void omap3_core_save_context(void)
@@ -267,14 +270,18 @@
  */
 static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
 {
-	u32 irqstatus_mpu;
+	u32 irqenable_mpu, irqstatus_mpu;
 	int c = 0;
 
-	do {
-		irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
-					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+	irqenable_mpu = prm_read_mod_reg(OCP_MOD,
+					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+	irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+	irqstatus_mpu &= irqenable_mpu;
 
-		if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) {
+	do {
+		if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
+				     OMAP3430_IO_ST_MASK)) {
 			c = _prcm_int_handle_wakeup();
 
 			/*
@@ -292,7 +299,11 @@
 		prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
-	} while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET));
+		irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+		irqstatus_mpu &= irqenable_mpu;
+
+	} while (irqstatus_mpu);
 
 	return IRQ_HANDLED;
 }
@@ -371,12 +382,19 @@
 	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
 		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
 
-	/* PER */
+	/* Enable IO-PAD and IO-CHAIN wakeups */
 	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
 	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
+	if (per_next_state < PWRDM_POWER_ON ||
+			core_next_state < PWRDM_POWER_ON) {
+		prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
+		omap3_enable_io_chain();
+	}
+
+	/* PER */
 	if (per_next_state < PWRDM_POWER_ON) {
 		omap_uart_prepare_idle(2);
-		omap2_gpio_prepare_for_retention();
+		omap2_gpio_prepare_for_idle(per_next_state);
 		if (per_next_state == PWRDM_POWER_OFF) {
 			if (core_next_state == PWRDM_POWER_ON) {
 				per_next_state = PWRDM_POWER_RET;
@@ -398,10 +416,8 @@
 			omap3_core_save_context();
 			omap3_prcm_save_context();
 		}
-		/* Enable IO-PAD and IO-CHAIN wakeups */
-		prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN);
-		omap3_enable_io_chain();
 	}
+
 	omap3_intc_prepare_idle();
 
 	/*
@@ -445,7 +461,7 @@
 		omap_uart_resume_idle(0);
 		omap_uart_resume_idle(1);
 		if (core_next_state == PWRDM_POWER_OFF)
-			prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF,
+			prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
 					       OMAP3430_GR_MOD,
 					       OMAP3_PRM_VOLTCTRL_OFFSET);
 	}
@@ -454,9 +470,9 @@
 	/* PER */
 	if (per_next_state < PWRDM_POWER_ON) {
 		per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
+		omap2_gpio_resume_after_idle();
 		if (per_prev_state == PWRDM_POWER_OFF)
 			omap3_per_restore_context();
-		omap2_gpio_resume_after_retention();
 		omap_uart_resume_idle(2);
 		if (per_state_modified)
 			pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF);
@@ -464,7 +480,7 @@
 
 	/* Disable IO-PAD and IO-CHAIN wakeup */
 	if (core_next_state < PWRDM_POWER_ON) {
-		prm_clear_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN);
+		prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
 		omap3_disable_io_chain();
 	}
 
@@ -548,20 +564,21 @@
 #ifdef CONFIG_SUSPEND
 static suspend_state_t suspend_state;
 
-static void omap2_pm_wakeup_on_timer(u32 seconds)
+static void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
 {
 	u32 tick_rate, cycles;
 
-	if (!seconds)
+	if (!seconds && !milliseconds)
 		return;
 
 	tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
-	cycles = tick_rate * seconds;
+	cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
 	omap_dm_timer_stop(gptimer_wakeup);
 	omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
 
-	pr_info("PM: Resume timer in %d secs (%d ticks at %d ticks/sec.)\n",
-		seconds, cycles, tick_rate);
+	pr_info("PM: Resume timer in %u.%03u secs"
+		" (%d ticks at %d ticks/sec.)\n",
+		seconds, milliseconds, cycles, tick_rate);
 }
 
 static int omap3_pm_prepare(void)
@@ -575,8 +592,9 @@
 	struct power_state *pwrst;
 	int state, ret = 0;
 
-	if (wakeup_timer_seconds)
-		omap2_pm_wakeup_on_timer(wakeup_timer_seconds);
+	if (wakeup_timer_seconds || wakeup_timer_milliseconds)
+		omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
+					 wakeup_timer_milliseconds);
 
 	/* Read current next_pwrsts */
 	list_for_each_entry(pwrst, &pwrst_list, node)
@@ -683,9 +701,9 @@
 		return;
 
 	/* Reset IVA2 */
-	prm_write_mod_reg(OMAP3430_RST1_IVA2 |
-			  OMAP3430_RST2_IVA2 |
-			  OMAP3430_RST3_IVA2,
+	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+			  OMAP3430_RST2_IVA2_MASK |
+			  OMAP3430_RST3_IVA2_MASK,
 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 
 	/* Enable IVA2 clock */
@@ -703,9 +721,9 @@
 	cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
 
 	/* Reset IVA2 */
-	prm_write_mod_reg(OMAP3430_RST1_IVA2 |
-			  OMAP3430_RST2_IVA2 |
-			  OMAP3430_RST3_IVA2,
+	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+			  OMAP3430_RST2_IVA2_MASK |
+			  OMAP3430_RST3_IVA2_MASK,
 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 }
 
@@ -727,8 +745,8 @@
 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
 
 	/* reset modem */
-	prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON |
-			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST,
+	prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
+			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
 			  CORE_MOD, OMAP2_RM_RSTCTRL);
 	prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
 }
@@ -754,102 +772,102 @@
 	 * Note that in the long run this should be done by clockfw
 	 */
 	cm_write_mod_reg(
-		OMAP3430_AUTO_MODEM |
-		OMAP3430ES2_AUTO_MMC3 |
-		OMAP3430ES2_AUTO_ICR |
-		OMAP3430_AUTO_AES2 |
-		OMAP3430_AUTO_SHA12 |
-		OMAP3430_AUTO_DES2 |
-		OMAP3430_AUTO_MMC2 |
-		OMAP3430_AUTO_MMC1 |
-		OMAP3430_AUTO_MSPRO |
-		OMAP3430_AUTO_HDQ |
-		OMAP3430_AUTO_MCSPI4 |
-		OMAP3430_AUTO_MCSPI3 |
-		OMAP3430_AUTO_MCSPI2 |
-		OMAP3430_AUTO_MCSPI1 |
-		OMAP3430_AUTO_I2C3 |
-		OMAP3430_AUTO_I2C2 |
-		OMAP3430_AUTO_I2C1 |
-		OMAP3430_AUTO_UART2 |
-		OMAP3430_AUTO_UART1 |
-		OMAP3430_AUTO_GPT11 |
-		OMAP3430_AUTO_GPT10 |
-		OMAP3430_AUTO_MCBSP5 |
-		OMAP3430_AUTO_MCBSP1 |
-		OMAP3430ES1_AUTO_FAC | /* This is es1 only */
-		OMAP3430_AUTO_MAILBOXES |
-		OMAP3430_AUTO_OMAPCTRL |
-		OMAP3430ES1_AUTO_FSHOSTUSB |
-		OMAP3430_AUTO_HSOTGUSB |
-		OMAP3430_AUTO_SAD2D |
-		OMAP3430_AUTO_SSI,
+		OMAP3430_AUTO_MODEM_MASK |
+		OMAP3430ES2_AUTO_MMC3_MASK |
+		OMAP3430ES2_AUTO_ICR_MASK |
+		OMAP3430_AUTO_AES2_MASK |
+		OMAP3430_AUTO_SHA12_MASK |
+		OMAP3430_AUTO_DES2_MASK |
+		OMAP3430_AUTO_MMC2_MASK |
+		OMAP3430_AUTO_MMC1_MASK |
+		OMAP3430_AUTO_MSPRO_MASK |
+		OMAP3430_AUTO_HDQ_MASK |
+		OMAP3430_AUTO_MCSPI4_MASK |
+		OMAP3430_AUTO_MCSPI3_MASK |
+		OMAP3430_AUTO_MCSPI2_MASK |
+		OMAP3430_AUTO_MCSPI1_MASK |
+		OMAP3430_AUTO_I2C3_MASK |
+		OMAP3430_AUTO_I2C2_MASK |
+		OMAP3430_AUTO_I2C1_MASK |
+		OMAP3430_AUTO_UART2_MASK |
+		OMAP3430_AUTO_UART1_MASK |
+		OMAP3430_AUTO_GPT11_MASK |
+		OMAP3430_AUTO_GPT10_MASK |
+		OMAP3430_AUTO_MCBSP5_MASK |
+		OMAP3430_AUTO_MCBSP1_MASK |
+		OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
+		OMAP3430_AUTO_MAILBOXES_MASK |
+		OMAP3430_AUTO_OMAPCTRL_MASK |
+		OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
+		OMAP3430_AUTO_HSOTGUSB_MASK |
+		OMAP3430_AUTO_SAD2D_MASK |
+		OMAP3430_AUTO_SSI_MASK,
 		CORE_MOD, CM_AUTOIDLE1);
 
 	cm_write_mod_reg(
-		OMAP3430_AUTO_PKA |
-		OMAP3430_AUTO_AES1 |
-		OMAP3430_AUTO_RNG |
-		OMAP3430_AUTO_SHA11 |
-		OMAP3430_AUTO_DES1,
+		OMAP3430_AUTO_PKA_MASK |
+		OMAP3430_AUTO_AES1_MASK |
+		OMAP3430_AUTO_RNG_MASK |
+		OMAP3430_AUTO_SHA11_MASK |
+		OMAP3430_AUTO_DES1_MASK,
 		CORE_MOD, CM_AUTOIDLE2);
 
 	if (omap_rev() > OMAP3430_REV_ES1_0) {
 		cm_write_mod_reg(
-			OMAP3430_AUTO_MAD2D |
-			OMAP3430ES2_AUTO_USBTLL,
+			OMAP3430_AUTO_MAD2D_MASK |
+			OMAP3430ES2_AUTO_USBTLL_MASK,
 			CORE_MOD, CM_AUTOIDLE3);
 	}
 
 	cm_write_mod_reg(
-		OMAP3430_AUTO_WDT2 |
-		OMAP3430_AUTO_WDT1 |
-		OMAP3430_AUTO_GPIO1 |
-		OMAP3430_AUTO_32KSYNC |
-		OMAP3430_AUTO_GPT12 |
-		OMAP3430_AUTO_GPT1 ,
+		OMAP3430_AUTO_WDT2_MASK |
+		OMAP3430_AUTO_WDT1_MASK |
+		OMAP3430_AUTO_GPIO1_MASK |
+		OMAP3430_AUTO_32KSYNC_MASK |
+		OMAP3430_AUTO_GPT12_MASK |
+		OMAP3430_AUTO_GPT1_MASK,
 		WKUP_MOD, CM_AUTOIDLE);
 
 	cm_write_mod_reg(
-		OMAP3430_AUTO_DSS,
+		OMAP3430_AUTO_DSS_MASK,
 		OMAP3430_DSS_MOD,
 		CM_AUTOIDLE);
 
 	cm_write_mod_reg(
-		OMAP3430_AUTO_CAM,
+		OMAP3430_AUTO_CAM_MASK,
 		OMAP3430_CAM_MOD,
 		CM_AUTOIDLE);
 
 	cm_write_mod_reg(
-		OMAP3430_AUTO_GPIO6 |
-		OMAP3430_AUTO_GPIO5 |
-		OMAP3430_AUTO_GPIO4 |
-		OMAP3430_AUTO_GPIO3 |
-		OMAP3430_AUTO_GPIO2 |
-		OMAP3430_AUTO_WDT3 |
-		OMAP3430_AUTO_UART3 |
-		OMAP3430_AUTO_GPT9 |
-		OMAP3430_AUTO_GPT8 |
-		OMAP3430_AUTO_GPT7 |
-		OMAP3430_AUTO_GPT6 |
-		OMAP3430_AUTO_GPT5 |
-		OMAP3430_AUTO_GPT4 |
-		OMAP3430_AUTO_GPT3 |
-		OMAP3430_AUTO_GPT2 |
-		OMAP3430_AUTO_MCBSP4 |
-		OMAP3430_AUTO_MCBSP3 |
-		OMAP3430_AUTO_MCBSP2,
+		OMAP3430_AUTO_GPIO6_MASK |
+		OMAP3430_AUTO_GPIO5_MASK |
+		OMAP3430_AUTO_GPIO4_MASK |
+		OMAP3430_AUTO_GPIO3_MASK |
+		OMAP3430_AUTO_GPIO2_MASK |
+		OMAP3430_AUTO_WDT3_MASK |
+		OMAP3430_AUTO_UART3_MASK |
+		OMAP3430_AUTO_GPT9_MASK |
+		OMAP3430_AUTO_GPT8_MASK |
+		OMAP3430_AUTO_GPT7_MASK |
+		OMAP3430_AUTO_GPT6_MASK |
+		OMAP3430_AUTO_GPT5_MASK |
+		OMAP3430_AUTO_GPT4_MASK |
+		OMAP3430_AUTO_GPT3_MASK |
+		OMAP3430_AUTO_GPT2_MASK |
+		OMAP3430_AUTO_MCBSP4_MASK |
+		OMAP3430_AUTO_MCBSP3_MASK |
+		OMAP3430_AUTO_MCBSP2_MASK,
 		OMAP3430_PER_MOD,
 		CM_AUTOIDLE);
 
 	if (omap_rev() > OMAP3430_REV_ES1_0) {
 		cm_write_mod_reg(
-			OMAP3430ES2_AUTO_USBHOST,
+			OMAP3430ES2_AUTO_USBHOST_MASK,
 			OMAP3430ES2_USBHOST_MOD,
 			CM_AUTOIDLE);
 	}
 
-	omap_ctrl_writel(OMAP3430_AUTOIDLE, OMAP2_CONTROL_SYSCONFIG);
+	omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
 
 	/*
 	 * Set all plls to autoidle. This is needed until autoidle is
@@ -879,35 +897,40 @@
 			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);
 
 	/* setup wakup source */
-	prm_write_mod_reg(OMAP3430_EN_IO | OMAP3430_EN_GPIO1 |
-			  OMAP3430_EN_GPT1 | OMAP3430_EN_GPT12,
+	prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
+			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
 			  WKUP_MOD, PM_WKEN);
 	/* No need to write EN_IO, that is always enabled */
-	prm_write_mod_reg(OMAP3430_EN_GPIO1 | OMAP3430_EN_GPT1 |
-			  OMAP3430_EN_GPT12,
+	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
+			  OMAP3430_GRPSEL_GPT1_MASK |
+			  OMAP3430_GRPSEL_GPT12_MASK,
 			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
 	/* For some reason IO doesn't generate wakeup event even if
 	 * it is selected to mpu wakeup goup */
-	prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
+	prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
 			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
 
 	/* Enable PM_WKEN to support DSS LPR */
-	prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS,
+	prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
 				OMAP3430_DSS_MOD, PM_WKEN);
 
 	/* Enable wakeups in PER */
-	prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
-			  OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
-			  OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3 |
-			  OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
-			  OMAP3430_EN_MCBSP4,
+	prm_write_mod_reg(OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
+			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
+			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
+			  OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
+			  OMAP3430_EN_MCBSP4_MASK,
 			  OMAP3430_PER_MOD, PM_WKEN);
 	/* and allow them to wake up MPU */
-	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
-			  OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
-			  OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3 |
-			  OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
-			  OMAP3430_EN_MCBSP4,
+	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2_MASK |
+			  OMAP3430_GRPSEL_GPIO3_MASK |
+			  OMAP3430_GRPSEL_GPIO4_MASK |
+			  OMAP3430_GRPSEL_GPIO5_MASK |
+			  OMAP3430_GRPSEL_GPIO6_MASK |
+			  OMAP3430_GRPSEL_UART3_MASK |
+			  OMAP3430_GRPSEL_MCBSP2_MASK |
+			  OMAP3430_GRPSEL_MCBSP3_MASK |
+			  OMAP3430_GRPSEL_MCBSP4_MASK,
 			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
 
 	/* Don't attach IVA interrupts */
@@ -1080,14 +1103,6 @@
 	omap3_idle_init();
 
 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
-	/*
-	 * REVISIT: This wkdep is only necessary when GPIO2-6 are enabled for
-	 * IO-pad wakeup.  Otherwise it will unnecessarily waste power
-	 * waking up PER with every CORE wakeup - see
-	 * http://marc.info/?l=linux-omap&m=121852150710062&w=2
-	*/
-	clkdm_add_wkdep(per_clkdm, core_clkdm);
-
 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
 		omap3_secure_ram_storage =
 			kmalloc(0x803F, GFP_KERNEL);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index ebfce7d..a2904aa 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -5,8 +5,8 @@
  * Copyright (C) 2007-2009 Nokia Corporation
  *
  * Written by Paul Walmsley
- *
  * Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com>
+ * State counting code by Tero Kristo <tero.kristo@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -64,10 +64,10 @@
 #define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK
 
 /* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */
-#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE
-#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE
-#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE
-#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE
+#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK
+#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK
+#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK
+#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK
 #define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK
 
 /* OMAP3 and OMAP4 Memory Status bits */
@@ -511,6 +511,8 @@
  */
 int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
 {
+	u32 v;
+
 	if (!pwrdm)
 		return -EINVAL;
 
@@ -526,9 +528,9 @@
 	 * but the type of value returned is the same for each
 	 * powerdomain.
 	 */
-	prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE,
-			     (pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE)),
-				 pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	v = pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE_MASK);
+	prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE_MASK, v,
+			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
 
 	return 0;
 }
@@ -676,8 +678,8 @@
 	if (!pwrdm)
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-				 pwrstst_reg_offs, OMAP3430_LOGICSTATEST);
+	return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstst_reg_offs,
+				       OMAP3430_LOGICSTATEST_MASK);
 }
 
 /**
@@ -700,7 +702,7 @@
 	 * powerdomain.
 	 */
 	return prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST,
-					OMAP3430_LASTLOGICSTATEENTERED);
+					OMAP3430_LASTLOGICSTATEENTERED_MASK);
 }
 
 /**
@@ -723,7 +725,7 @@
 	 * powerdomain.
 	 */
 	return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstctrl_reg_offs,
-					OMAP3430_LOGICSTATEST);
+				       OMAP3430_LOGICSTATEST_MASK);
 }
 
 /**
@@ -978,6 +980,34 @@
 }
 
 /**
+ * pwrdm_set_lowpwrstchange - Request a low power state change
+ * @pwrdm: struct powerdomain *
+ *
+ * Allows a powerdomain to transtion to a lower power sleep state
+ * from an existing sleep state without waking up the powerdomain.
+ * Returns -EINVAL if the powerdomain pointer is null or if the
+ * powerdomain does not support LOWPOWERSTATECHANGE, or returns 0
+ * upon success.
+ */
+int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+{
+	if (!pwrdm)
+		return -EINVAL;
+
+	if (!(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE))
+		return -EINVAL;
+
+	pr_debug("powerdomain: %s: setting LOWPOWERSTATECHANGE bit\n",
+		 pwrdm->name);
+
+	prm_rmw_mod_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK,
+			     (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT),
+			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+
+	return 0;
+}
+
+/**
  * pwrdm_wait_transition - wait for powerdomain power transition to finish
  * @pwrdm: struct powerdomain * to wait for
  *
@@ -1002,7 +1032,7 @@
 
 	/* XXX Is this udelay() value meaningful? */
 	while ((prm_read_mod_reg(pwrdm->prcm_offs, pwrstst_reg_offs) &
-		OMAP_INTRANSITION) &&
+		OMAP_INTRANSITION_MASK) &&
 	       (c++ < PWRDM_TRANSITION_BAILOUT))
 			udelay(1);
 
diff --git a/arch/arm/mach-omap2/powerdomains44xx.h b/arch/arm/mach-omap2/powerdomains44xx.h
index c101514..c721951 100644
--- a/arch/arm/mach-omap2/powerdomains44xx.h
+++ b/arch/arm/mach-omap2/powerdomains44xx.h
@@ -1,12 +1,12 @@
 /*
  * OMAP4 Power domains framework
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
  *
  * Abhijit Pagare (abhijitpagare@ti.com)
  * Benoit Cousson (b-cousson@ti.com)
- * Paul Walmsley
+ * Paul Walmsley (paul@pwsan.com)
  *
  * This file is automatically generated from the OMAP hardware databases.
  * We respectfully ask that any modifications to this file be coordinated
@@ -54,6 +54,7 @@
 		[3] = PWRDM_POWER_ON,	/* ducati_l2ram */
 		[4] = PWRDM_POWER_ON,	/* ducati_unicache */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* gfx_44xx_pwrdm: 3D accelerator power domain */
@@ -69,6 +70,7 @@
 	.pwrsts_mem_on	= {
 		[0] = PWRDM_POWER_ON,	/* gfx_mem */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* abe_44xx_pwrdm: Audio back end power domain */
@@ -87,6 +89,7 @@
 		[0] = PWRDM_POWER_ON,	/* aessmem */
 		[1] = PWRDM_POWER_ON,	/* periphmem */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* dss_44xx_pwrdm: Display subsystem power domain */
@@ -103,6 +106,7 @@
 	.pwrsts_mem_on	= {
 		[0] = PWRDM_POWER_ON,	/* dss_mem */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* tesla_44xx_pwrdm: Tesla processor power domain */
@@ -123,6 +127,7 @@
 		[1] = PWRDM_POWER_ON,	/* tesla_l1 */
 		[2] = PWRDM_POWER_ON,	/* tesla_l2 */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* wkup_44xx_pwrdm: Wake-up power domain */
@@ -130,7 +135,7 @@
 	.name		  = "wkup_pwrdm",
 	.prcm_offs	  = OMAP4430_PRM_WKUP_MOD,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRDM_POWER_ON,
+	.pwrsts		  = PWRSTS_ON,
 	.banks		  = 1,
 	.pwrsts_mem_ret	= {
 		[0] = PWRDM_POWER_OFF,	/* wkup_bank */
@@ -143,7 +148,7 @@
 /* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
 static struct powerdomain cpu0_44xx_pwrdm = {
 	.name		  = "cpu0_pwrdm",
-	.prcm_offs	  = OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD,
+	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU0_MOD,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 	.pwrsts		  = PWRSTS_OFF_RET_ON,
 	.pwrsts_logic_ret = PWRSTS_OFF_RET,
@@ -159,7 +164,7 @@
 /* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
 static struct powerdomain cpu1_44xx_pwrdm = {
 	.name		  = "cpu1_pwrdm",
-	.prcm_offs	  = OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD,
+	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU1_MOD,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 	.pwrsts		  = PWRSTS_OFF_RET_ON,
 	.pwrsts_logic_ret = PWRSTS_OFF_RET,
@@ -227,6 +232,7 @@
 		[2] = PWRDM_POWER_ON,	/* tcm1_mem */
 		[3] = PWRDM_POWER_ON,	/* tcm2_mem */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* cam_44xx_pwrdm: Camera subsystem power domain */
@@ -242,6 +248,7 @@
 	.pwrsts_mem_on	= {
 		[0] = PWRDM_POWER_ON,	/* cam_mem */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* l3init_44xx_pwrdm: L3 initators pheripherals power domain  */
@@ -258,6 +265,7 @@
 	.pwrsts_mem_on	= {
 		[0] = PWRDM_POWER_ON,	/* l3init_bank1 */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* l4per_44xx_pwrdm: Target peripherals power domain */
@@ -276,6 +284,7 @@
 		[0] = PWRDM_POWER_ON,	/* nonretained_bank */
 		[1] = PWRDM_POWER_ON,	/* retained_bank */
 	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /*
@@ -286,7 +295,7 @@
 	.name		  = "always_on_core_pwrdm",
 	.prcm_offs	  = OMAP4430_PRM_ALWAYS_ON_MOD,
 	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRDM_POWER_ON,
+	.pwrsts		  = PWRSTS_ON,
 };
 
 /* cefuse_44xx_pwrdm: Customer efuse controller power domain */
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 90f603d..995b7ed 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -112,83 +112,75 @@
 
 #define OMAP4430_SCRM_SCRM_MOD	0x0000
 
-/* CHIRONSS instances */
+/* PRCM_MPU instances */
 
-#define OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD	0x0000
-#define OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD	0x0200
-#define OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD		0x0400
-#define OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD		0x0800
-
-/* Base Addresses for the OMAP4 */
-
-#define OMAP4430_CM1_BASE		0x4a004000
-#define OMAP4430_CM2_BASE		0x4a008000
-#define OMAP4430_PRM_BASE		0x4a306000
-#define OMAP4430_SCRM_BASE		0x4a30a000
-#define OMAP4430_CHIRONSS_BASE		0x48243000
+#define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD	0x0000
+#define OMAP4430_PRCM_MPU_DEVICE_PRM_MOD	0x0200
+#define OMAP4430_PRCM_MPU_CPU0_MOD		0x0400
+#define OMAP4430_PRCM_MPU_CPU1_MOD		0x0800
 
 
 /* 24XX register bits shared between CM & PRM registers */
 
 /* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
 #define OMAP2420_EN_MMC_SHIFT				26
-#define OMAP2420_EN_MMC					(1 << 26)
+#define OMAP2420_EN_MMC_MASK				(1 << 26)
 #define OMAP24XX_EN_UART2_SHIFT				22
-#define OMAP24XX_EN_UART2				(1 << 22)
+#define OMAP24XX_EN_UART2_MASK				(1 << 22)
 #define OMAP24XX_EN_UART1_SHIFT				21
-#define OMAP24XX_EN_UART1				(1 << 21)
+#define OMAP24XX_EN_UART1_MASK				(1 << 21)
 #define OMAP24XX_EN_MCSPI2_SHIFT			18
-#define OMAP24XX_EN_MCSPI2				(1 << 18)
+#define OMAP24XX_EN_MCSPI2_MASK				(1 << 18)
 #define OMAP24XX_EN_MCSPI1_SHIFT			17
-#define OMAP24XX_EN_MCSPI1				(1 << 17)
+#define OMAP24XX_EN_MCSPI1_MASK				(1 << 17)
 #define OMAP24XX_EN_MCBSP2_SHIFT			16
-#define OMAP24XX_EN_MCBSP2				(1 << 16)
+#define OMAP24XX_EN_MCBSP2_MASK				(1 << 16)
 #define OMAP24XX_EN_MCBSP1_SHIFT			15
-#define OMAP24XX_EN_MCBSP1				(1 << 15)
+#define OMAP24XX_EN_MCBSP1_MASK				(1 << 15)
 #define OMAP24XX_EN_GPT12_SHIFT				14
-#define OMAP24XX_EN_GPT12				(1 << 14)
+#define OMAP24XX_EN_GPT12_MASK				(1 << 14)
 #define OMAP24XX_EN_GPT11_SHIFT				13
-#define OMAP24XX_EN_GPT11				(1 << 13)
+#define OMAP24XX_EN_GPT11_MASK				(1 << 13)
 #define OMAP24XX_EN_GPT10_SHIFT				12
-#define OMAP24XX_EN_GPT10				(1 << 12)
+#define OMAP24XX_EN_GPT10_MASK				(1 << 12)
 #define OMAP24XX_EN_GPT9_SHIFT				11
-#define OMAP24XX_EN_GPT9				(1 << 11)
+#define OMAP24XX_EN_GPT9_MASK				(1 << 11)
 #define OMAP24XX_EN_GPT8_SHIFT				10
-#define OMAP24XX_EN_GPT8				(1 << 10)
+#define OMAP24XX_EN_GPT8_MASK				(1 << 10)
 #define OMAP24XX_EN_GPT7_SHIFT				9
-#define OMAP24XX_EN_GPT7				(1 << 9)
+#define OMAP24XX_EN_GPT7_MASK				(1 << 9)
 #define OMAP24XX_EN_GPT6_SHIFT				8
-#define OMAP24XX_EN_GPT6				(1 << 8)
+#define OMAP24XX_EN_GPT6_MASK				(1 << 8)
 #define OMAP24XX_EN_GPT5_SHIFT				7
-#define OMAP24XX_EN_GPT5				(1 << 7)
+#define OMAP24XX_EN_GPT5_MASK				(1 << 7)
 #define OMAP24XX_EN_GPT4_SHIFT				6
-#define OMAP24XX_EN_GPT4				(1 << 6)
+#define OMAP24XX_EN_GPT4_MASK				(1 << 6)
 #define OMAP24XX_EN_GPT3_SHIFT				5
-#define OMAP24XX_EN_GPT3				(1 << 5)
+#define OMAP24XX_EN_GPT3_MASK				(1 << 5)
 #define OMAP24XX_EN_GPT2_SHIFT				4
-#define OMAP24XX_EN_GPT2				(1 << 4)
+#define OMAP24XX_EN_GPT2_MASK				(1 << 4)
 #define OMAP2420_EN_VLYNQ_SHIFT				3
-#define OMAP2420_EN_VLYNQ				(1 << 3)
+#define OMAP2420_EN_VLYNQ_MASK				(1 << 3)
 
 /* CM_FCLKEN2_CORE, CM_ICLKEN2_CORE, PM_WKEN2_CORE shared bits */
 #define OMAP2430_EN_GPIO5_SHIFT				10
-#define OMAP2430_EN_GPIO5				(1 << 10)
+#define OMAP2430_EN_GPIO5_MASK				(1 << 10)
 #define OMAP2430_EN_MCSPI3_SHIFT			9
-#define OMAP2430_EN_MCSPI3				(1 << 9)
+#define OMAP2430_EN_MCSPI3_MASK				(1 << 9)
 #define OMAP2430_EN_MMCHS2_SHIFT			8
-#define OMAP2430_EN_MMCHS2				(1 << 8)
+#define OMAP2430_EN_MMCHS2_MASK				(1 << 8)
 #define OMAP2430_EN_MMCHS1_SHIFT			7
-#define OMAP2430_EN_MMCHS1				(1 << 7)
+#define OMAP2430_EN_MMCHS1_MASK				(1 << 7)
 #define OMAP24XX_EN_UART3_SHIFT				2
-#define OMAP24XX_EN_UART3				(1 << 2)
+#define OMAP24XX_EN_UART3_MASK				(1 << 2)
 #define OMAP24XX_EN_USB_SHIFT				0
-#define OMAP24XX_EN_USB					(1 << 0)
+#define OMAP24XX_EN_USB_MASK				(1 << 0)
 
 /* CM_ICLKEN2_CORE, PM_WKEN2_CORE shared bits */
 #define OMAP2430_EN_MDM_INTC_SHIFT			11
-#define OMAP2430_EN_MDM_INTC				(1 << 11)
+#define OMAP2430_EN_MDM_INTC_MASK			(1 << 11)
 #define OMAP2430_EN_USBHS_SHIFT				6
-#define OMAP2430_EN_USBHS				(1 << 6)
+#define OMAP2430_EN_USBHS_MASK				(1 << 6)
 
 /* CM_IDLEST1_CORE, PM_WKST1_CORE shared bits */
 #define OMAP2420_ST_MMC_SHIFT				26
@@ -246,9 +238,9 @@
 
 /* CM_FCLKEN_WKUP, CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
 #define OMAP24XX_EN_GPIOS_SHIFT				2
-#define OMAP24XX_EN_GPIOS				(1 << 2)
+#define OMAP24XX_EN_GPIOS_MASK				(1 << 2)
 #define OMAP24XX_EN_GPT1_SHIFT				0
-#define OMAP24XX_EN_GPT1				(1 << 0)
+#define OMAP24XX_EN_GPT1_MASK				(1 << 0)
 
 /* PM_WKST_WKUP, CM_IDLEST_WKUP shared bits */
 #define OMAP24XX_ST_GPIOS_SHIFT				(1 << 2)
@@ -267,47 +259,47 @@
 #define OMAP3430_REV_MASK				(0xff << 0)
 
 /* CM_SYSCONFIG, PRM_SYSCONFIG shared bits */
-#define OMAP3430_AUTOIDLE				(1 << 0)
+#define OMAP3430_AUTOIDLE_MASK				(1 << 0)
 
 /* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
-#define OMAP3430_EN_MMC2				(1 << 25)
+#define OMAP3430_EN_MMC2_MASK				(1 << 25)
 #define OMAP3430_EN_MMC2_SHIFT				25
-#define OMAP3430_EN_MMC1				(1 << 24)
+#define OMAP3430_EN_MMC1_MASK				(1 << 24)
 #define OMAP3430_EN_MMC1_SHIFT				24
-#define OMAP3430_EN_MCSPI4				(1 << 21)
+#define OMAP3430_EN_MCSPI4_MASK				(1 << 21)
 #define OMAP3430_EN_MCSPI4_SHIFT			21
-#define OMAP3430_EN_MCSPI3				(1 << 20)
+#define OMAP3430_EN_MCSPI3_MASK				(1 << 20)
 #define OMAP3430_EN_MCSPI3_SHIFT			20
-#define OMAP3430_EN_MCSPI2				(1 << 19)
+#define OMAP3430_EN_MCSPI2_MASK				(1 << 19)
 #define OMAP3430_EN_MCSPI2_SHIFT			19
-#define OMAP3430_EN_MCSPI1				(1 << 18)
+#define OMAP3430_EN_MCSPI1_MASK				(1 << 18)
 #define OMAP3430_EN_MCSPI1_SHIFT			18
-#define OMAP3430_EN_I2C3				(1 << 17)
+#define OMAP3430_EN_I2C3_MASK				(1 << 17)
 #define OMAP3430_EN_I2C3_SHIFT				17
-#define OMAP3430_EN_I2C2				(1 << 16)
+#define OMAP3430_EN_I2C2_MASK				(1 << 16)
 #define OMAP3430_EN_I2C2_SHIFT				16
-#define OMAP3430_EN_I2C1				(1 << 15)
+#define OMAP3430_EN_I2C1_MASK				(1 << 15)
 #define OMAP3430_EN_I2C1_SHIFT				15
-#define OMAP3430_EN_UART2				(1 << 14)
+#define OMAP3430_EN_UART2_MASK				(1 << 14)
 #define OMAP3430_EN_UART2_SHIFT				14
-#define OMAP3430_EN_UART1				(1 << 13)
+#define OMAP3430_EN_UART1_MASK				(1 << 13)
 #define OMAP3430_EN_UART1_SHIFT				13
-#define OMAP3430_EN_GPT11				(1 << 12)
+#define OMAP3430_EN_GPT11_MASK				(1 << 12)
 #define OMAP3430_EN_GPT11_SHIFT				12
-#define OMAP3430_EN_GPT10				(1 << 11)
+#define OMAP3430_EN_GPT10_MASK				(1 << 11)
 #define OMAP3430_EN_GPT10_SHIFT				11
-#define OMAP3430_EN_MCBSP5				(1 << 10)
+#define OMAP3430_EN_MCBSP5_MASK				(1 << 10)
 #define OMAP3430_EN_MCBSP5_SHIFT			10
-#define OMAP3430_EN_MCBSP1				(1 << 9)
+#define OMAP3430_EN_MCBSP1_MASK				(1 << 9)
 #define OMAP3430_EN_MCBSP1_SHIFT			9
-#define OMAP3430_EN_FSHOSTUSB				(1 << 5)
+#define OMAP3430_EN_FSHOSTUSB_MASK			(1 << 5)
 #define OMAP3430_EN_FSHOSTUSB_SHIFT			5
-#define OMAP3430_EN_D2D					(1 << 3)
+#define OMAP3430_EN_D2D_MASK				(1 << 3)
 #define OMAP3430_EN_D2D_SHIFT				3
 
 /* CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
-#define OMAP3430_EN_HSOTGUSB				(1 << 4)
-#define OMAP3430_EN_HSOTGUSB_SHIFT				4
+#define OMAP3430_EN_HSOTGUSB_MASK			(1 << 4)
+#define OMAP3430_EN_HSOTGUSB_SHIFT			4
 
 /* PM_WKST1_CORE, CM_IDLEST1_CORE shared bits */
 #define OMAP3430_ST_MMC2_SHIFT				25
@@ -352,21 +344,21 @@
 #define OMAP3430_ST_D2D_MASK				(1 << 3)
 
 /* CM_FCLKEN_WKUP, CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_GPIO1				(1 << 3)
+#define OMAP3430_EN_GPIO1_MASK				(1 << 3)
 #define OMAP3430_EN_GPIO1_SHIFT				3
-#define OMAP3430_EN_GPT12				(1 << 1)
+#define OMAP3430_EN_GPT12_MASK				(1 << 1)
 #define OMAP3430_EN_GPT12_SHIFT				1
-#define OMAP3430_EN_GPT1				(1 << 0)
+#define OMAP3430_EN_GPT1_MASK				(1 << 0)
 #define OMAP3430_EN_GPT1_SHIFT				0
 
 /* CM_FCLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_SR2					(1 << 7)
+#define OMAP3430_EN_SR2_MASK				(1 << 7)
 #define OMAP3430_EN_SR2_SHIFT				7
-#define OMAP3430_EN_SR1					(1 << 6)
+#define OMAP3430_EN_SR1_MASK				(1 << 6)
 #define OMAP3430_EN_SR1_SHIFT				6
 
 /* CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_GPT12				(1 << 1)
+#define OMAP3430_EN_GPT12_MASK				(1 << 1)
 #define OMAP3430_EN_GPT12_SHIFT				1
 
 /* CM_IDLEST_WKUP, PM_WKST_WKUP shared bits */
@@ -386,47 +378,47 @@
  * CM_SLEEPDEP_PER, PM_WKDEP_IVA2, PM_WKDEP_GFX,
  * PM_WKDEP_DSS, PM_WKDEP_CAM, PM_WKDEP_PER, PM_WKDEP_NEON shared bits
  */
-#define OMAP3430_EN_MPU					(1 << 1)
+#define OMAP3430_EN_MPU_MASK				(1 << 1)
 #define OMAP3430_EN_MPU_SHIFT				1
 
 /* CM_FCLKEN_PER, CM_ICLKEN_PER, PM_WKEN_PER shared bits */
-#define OMAP3430_EN_GPIO6				(1 << 17)
+#define OMAP3430_EN_GPIO6_MASK				(1 << 17)
 #define OMAP3430_EN_GPIO6_SHIFT				17
-#define OMAP3430_EN_GPIO5				(1 << 16)
+#define OMAP3430_EN_GPIO5_MASK				(1 << 16)
 #define OMAP3430_EN_GPIO5_SHIFT				16
-#define OMAP3430_EN_GPIO4				(1 << 15)
+#define OMAP3430_EN_GPIO4_MASK				(1 << 15)
 #define OMAP3430_EN_GPIO4_SHIFT				15
-#define OMAP3430_EN_GPIO3				(1 << 14)
+#define OMAP3430_EN_GPIO3_MASK				(1 << 14)
 #define OMAP3430_EN_GPIO3_SHIFT				14
-#define OMAP3430_EN_GPIO2				(1 << 13)
+#define OMAP3430_EN_GPIO2_MASK				(1 << 13)
 #define OMAP3430_EN_GPIO2_SHIFT				13
-#define OMAP3430_EN_UART3				(1 << 11)
+#define OMAP3430_EN_UART3_MASK				(1 << 11)
 #define OMAP3430_EN_UART3_SHIFT				11
-#define OMAP3430_EN_GPT9				(1 << 10)
+#define OMAP3430_EN_GPT9_MASK				(1 << 10)
 #define OMAP3430_EN_GPT9_SHIFT				10
-#define OMAP3430_EN_GPT8				(1 << 9)
+#define OMAP3430_EN_GPT8_MASK				(1 << 9)
 #define OMAP3430_EN_GPT8_SHIFT				9
-#define OMAP3430_EN_GPT7				(1 << 8)
+#define OMAP3430_EN_GPT7_MASK				(1 << 8)
 #define OMAP3430_EN_GPT7_SHIFT				8
-#define OMAP3430_EN_GPT6				(1 << 7)
+#define OMAP3430_EN_GPT6_MASK				(1 << 7)
 #define OMAP3430_EN_GPT6_SHIFT				7
-#define OMAP3430_EN_GPT5				(1 << 6)
+#define OMAP3430_EN_GPT5_MASK				(1 << 6)
 #define OMAP3430_EN_GPT5_SHIFT				6
-#define OMAP3430_EN_GPT4				(1 << 5)
+#define OMAP3430_EN_GPT4_MASK				(1 << 5)
 #define OMAP3430_EN_GPT4_SHIFT				5
-#define OMAP3430_EN_GPT3				(1 << 4)
+#define OMAP3430_EN_GPT3_MASK				(1 << 4)
 #define OMAP3430_EN_GPT3_SHIFT				4
-#define OMAP3430_EN_GPT2				(1 << 3)
+#define OMAP3430_EN_GPT2_MASK				(1 << 3)
 #define OMAP3430_EN_GPT2_SHIFT				3
 
 /* CM_FCLKEN_PER, CM_ICLKEN_PER, PM_WKEN_PER, PM_WKST_PER shared bits */
 /* XXX Possible TI documentation bug: should the PM_WKST_PER EN_* bits
  * be ST_* bits instead? */
-#define OMAP3430_EN_MCBSP4				(1 << 2)
+#define OMAP3430_EN_MCBSP4_MASK				(1 << 2)
 #define OMAP3430_EN_MCBSP4_SHIFT			2
-#define OMAP3430_EN_MCBSP3				(1 << 1)
+#define OMAP3430_EN_MCBSP3_MASK				(1 << 1)
 #define OMAP3430_EN_MCBSP3_SHIFT			1
-#define OMAP3430_EN_MCBSP2				(1 << 0)
+#define OMAP3430_EN_MCBSP2_MASK				(1 << 0)
 #define OMAP3430_EN_MCBSP2_SHIFT			0
 
 /* CM_IDLEST_PER, PM_WKST_PER shared bits */
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 07a60f1..c201374 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -158,10 +158,10 @@
 		WARN_ON(1);
 
 	if (cpu_is_omap24xx() || cpu_is_omap34xx())
-		prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
+		prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
 						 OMAP2_RM_RSTCTRL);
 	if (cpu_is_omap44xx())
-		prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
+		prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
 						 OMAP4_RM_RSTCTRL);
 }
 
diff --git a/arch/arm/mach-omap2/prm-regbits-24xx.h b/arch/arm/mach-omap2/prm-regbits-24xx.h
index 4002051..0b188ff 100644
--- a/arch/arm/mach-omap2/prm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-24xx.h
@@ -19,14 +19,14 @@
 /* Bits shared between registers */
 
 /* PRCM_IRQSTATUS_MPU, PM_IRQSTATUS_DSP, PRCM_IRQSTATUS_IVA shared bits */
-#define OMAP24XX_VOLTTRANS_ST				(1 << 2)
-#define OMAP24XX_WKUP2_ST				(1 << 1)
-#define OMAP24XX_WKUP1_ST				(1 << 0)
+#define OMAP24XX_VOLTTRANS_ST_MASK			(1 << 2)
+#define OMAP24XX_WKUP2_ST_MASK				(1 << 1)
+#define OMAP24XX_WKUP1_ST_MASK				(1 << 0)
 
 /* PRCM_IRQENABLE_MPU, PM_IRQENABLE_DSP, PRCM_IRQENABLE_IVA shared bits */
-#define OMAP24XX_VOLTTRANS_EN				(1 << 2)
-#define OMAP24XX_WKUP2_EN				(1 << 1)
-#define OMAP24XX_WKUP1_EN				(1 << 0)
+#define OMAP24XX_VOLTTRANS_EN_MASK			(1 << 2)
+#define OMAP24XX_WKUP2_EN_MASK				(1 << 1)
+#define OMAP24XX_WKUP1_EN_MASK				(1 << 0)
 
 /* PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_DSP, PM_WKDEP_MDM shared bits */
 #define OMAP24XX_EN_MPU_SHIFT				1
@@ -40,16 +40,16 @@
  */
 #define OMAP24XX_MEMONSTATE_SHIFT			10
 #define OMAP24XX_MEMONSTATE_MASK			(0x3 << 10)
-#define OMAP24XX_MEMRETSTATE				(1 << 3)
+#define OMAP24XX_MEMRETSTATE_MASK			(1 << 3)
 
 /* PM_PWSTCTRL_GFX, PM_PWSTCTRL_DSP, PM_PWSTCTRL_MDM shared bits */
-#define OMAP24XX_FORCESTATE				(1 << 18)
+#define OMAP24XX_FORCESTATE_MASK			(1 << 18)
 
 /*
  * PM_PWSTST_CORE, PM_PWSTST_GFX, PM_PWSTST_MPU, PM_PWSTST_DSP,
  * PM_PWSTST_MDM shared bits
  */
-#define OMAP24XX_CLKACTIVITY				(1 << 19)
+#define OMAP24XX_CLKACTIVITY_MASK			(1 << 19)
 
 /* PM_PWSTST_MPU, PM_PWSTST_CORE, PM_PWSTST_DSP shared bits */
 #define OMAP24XX_LASTSTATEENTERED_SHIFT			4
@@ -71,26 +71,26 @@
 #define OMAP24XX_REV_MASK				(0xff << 0)
 
 /* PRCM_SYSCONFIG */
-#define OMAP24XX_AUTOIDLE				(1 << 0)
+#define OMAP24XX_AUTOIDLE_MASK				(1 << 0)
 
 /* PRCM_IRQSTATUS_MPU specific bits */
-#define OMAP2430_DPLL_RECAL_ST				(1 << 6)
-#define OMAP24XX_TRANSITION_ST				(1 << 5)
-#define OMAP24XX_EVGENOFF_ST				(1 << 4)
-#define OMAP24XX_EVGENON_ST				(1 << 3)
+#define OMAP2430_DPLL_RECAL_ST_MASK			(1 << 6)
+#define OMAP24XX_TRANSITION_ST_MASK			(1 << 5)
+#define OMAP24XX_EVGENOFF_ST_MASK			(1 << 4)
+#define OMAP24XX_EVGENON_ST_MASK			(1 << 3)
 
 /* PRCM_IRQENABLE_MPU specific bits */
-#define OMAP2430_DPLL_RECAL_EN				(1 << 6)
-#define OMAP24XX_TRANSITION_EN				(1 << 5)
-#define OMAP24XX_EVGENOFF_EN				(1 << 4)
-#define OMAP24XX_EVGENON_EN				(1 << 3)
+#define OMAP2430_DPLL_RECAL_EN_MASK			(1 << 6)
+#define OMAP24XX_TRANSITION_EN_MASK			(1 << 5)
+#define OMAP24XX_EVGENOFF_EN_MASK			(1 << 4)
+#define OMAP24XX_EVGENON_EN_MASK			(1 << 3)
 
 /* PRCM_VOLTCTRL */
-#define OMAP24XX_AUTO_EXTVOLT				(1 << 15)
-#define OMAP24XX_FORCE_EXTVOLT				(1 << 14)
+#define OMAP24XX_AUTO_EXTVOLT_MASK			(1 << 15)
+#define OMAP24XX_FORCE_EXTVOLT_MASK			(1 << 14)
 #define OMAP24XX_SETOFF_LEVEL_SHIFT			12
 #define OMAP24XX_SETOFF_LEVEL_MASK			(0x3 << 12)
-#define OMAP24XX_MEMRETCTRL				(1 << 8)
+#define OMAP24XX_MEMRETCTRL_MASK			(1 << 8)
 #define OMAP24XX_SETRET_LEVEL_SHIFT			6
 #define OMAP24XX_SETRET_LEVEL_MASK			(0x3 << 6)
 #define OMAP24XX_VOLT_LEVEL_SHIFT			0
@@ -104,13 +104,13 @@
 
 /* PRCM_CLKOUT_CTRL */
 #define OMAP2420_CLKOUT2_EN_SHIFT			15
-#define OMAP2420_CLKOUT2_EN				(1 << 15)
+#define OMAP2420_CLKOUT2_EN_MASK			(1 << 15)
 #define OMAP2420_CLKOUT2_DIV_SHIFT			11
 #define OMAP2420_CLKOUT2_DIV_MASK			(0x7 << 11)
 #define OMAP2420_CLKOUT2_SOURCE_SHIFT			8
 #define OMAP2420_CLKOUT2_SOURCE_MASK			(0x3 << 8)
 #define OMAP24XX_CLKOUT_EN_SHIFT			7
-#define OMAP24XX_CLKOUT_EN				(1 << 7)
+#define OMAP24XX_CLKOUT_EN_MASK				(1 << 7)
 #define OMAP24XX_CLKOUT_DIV_SHIFT			3
 #define OMAP24XX_CLKOUT_DIV_MASK			(0x7 << 3)
 #define OMAP24XX_CLKOUT_SOURCE_SHIFT			0
@@ -118,25 +118,25 @@
 
 /* PRCM_CLKEMUL_CTRL */
 #define OMAP24XX_EMULATION_EN_SHIFT			0
-#define OMAP24XX_EMULATION_EN				(1 << 0)
+#define OMAP24XX_EMULATION_EN_MASK			(1 << 0)
 
 /* PRCM_CLKCFG_CTRL */
-#define OMAP24XX_VALID_CONFIG				(1 << 0)
+#define OMAP24XX_VALID_CONFIG_MASK			(1 << 0)
 
 /* PRCM_CLKCFG_STATUS */
-#define OMAP24XX_CONFIG_STATUS				(1 << 0)
+#define OMAP24XX_CONFIG_STATUS_MASK			(1 << 0)
 
 /* PRCM_VOLTSETUP specific bits */
 
 /* PRCM_CLKSSETUP specific bits */
 
 /* PRCM_POLCTRL */
-#define OMAP2420_CLKOUT2_POL				(1 << 10)
-#define OMAP24XX_CLKOUT_POL				(1 << 9)
-#define OMAP24XX_CLKREQ_POL				(1 << 8)
-#define OMAP2430_USE_POWEROK				(1 << 2)
-#define OMAP2430_POWEROK_POL				(1 << 1)
-#define OMAP24XX_EXTVOL_POL				(1 << 0)
+#define OMAP2420_CLKOUT2_POL_MASK			(1 << 10)
+#define OMAP24XX_CLKOUT_POL_MASK			(1 << 9)
+#define OMAP24XX_CLKREQ_POL_MASK			(1 << 8)
+#define OMAP2430_USE_POWEROK_MASK			(1 << 2)
+#define OMAP2430_POWEROK_POL_MASK			(1 << 1)
+#define OMAP24XX_EXTVOL_POL_MASK			(1 << 0)
 
 /* RM_RSTST_MPU specific bits */
 /* 2430 calls GLOBALWMPU_RST "GLOBALWARM_RST" instead */
@@ -154,7 +154,7 @@
 /* PM_EVEGENOFFTIM_MPU specific bits */
 
 /* PM_PWSTCTRL_MPU specific bits */
-#define OMAP2430_FORCESTATE				(1 << 18)
+#define OMAP2430_FORCESTATE_MASK			(1 << 18)
 
 /* PM_PWSTST_MPU specific bits */
 /* INTRANSITION, CLKACTIVITY, POWERSTATE, MEMSTATEST are 2430 only */
@@ -168,21 +168,21 @@
 /* PM_WKST2_CORE specific bits */
 
 /* PM_WKDEP_CORE specific bits*/
-#define OMAP2430_PM_WKDEP_CORE_EN_MDM			(1 << 5)
-#define OMAP24XX_PM_WKDEP_CORE_EN_GFX			(1 << 3)
-#define OMAP24XX_PM_WKDEP_CORE_EN_DSP			(1 << 2)
+#define OMAP2430_PM_WKDEP_CORE_EN_MDM_MASK		(1 << 5)
+#define OMAP24XX_PM_WKDEP_CORE_EN_GFX_MASK		(1 << 3)
+#define OMAP24XX_PM_WKDEP_CORE_EN_DSP_MASK		(1 << 2)
 
 /* PM_PWSTCTRL_CORE specific bits */
-#define OMAP24XX_MEMORYCHANGE				(1 << 20)
+#define OMAP24XX_MEMORYCHANGE_MASK			(1 << 20)
 #define OMAP24XX_MEM3ONSTATE_SHIFT			14
 #define OMAP24XX_MEM3ONSTATE_MASK			(0x3 << 14)
 #define OMAP24XX_MEM2ONSTATE_SHIFT			12
 #define OMAP24XX_MEM2ONSTATE_MASK			(0x3 << 12)
 #define OMAP24XX_MEM1ONSTATE_SHIFT			10
 #define OMAP24XX_MEM1ONSTATE_MASK			(0x3 << 10)
-#define OMAP24XX_MEM3RETSTATE				(1 << 5)
-#define OMAP24XX_MEM2RETSTATE				(1 << 4)
-#define OMAP24XX_MEM1RETSTATE				(1 << 3)
+#define OMAP24XX_MEM3RETSTATE_MASK			(1 << 5)
+#define OMAP24XX_MEM2RETSTATE_MASK			(1 << 4)
+#define OMAP24XX_MEM1RETSTATE_MASK			(1 << 3)
 
 /* PM_PWSTST_CORE specific bits */
 #define OMAP24XX_MEM3STATEST_SHIFT			14
@@ -193,10 +193,10 @@
 #define OMAP24XX_MEM1STATEST_MASK			(0x3 << 10)
 
 /* RM_RSTCTRL_GFX */
-#define OMAP24XX_GFX_RST				(1 << 0)
+#define OMAP24XX_GFX_RST_MASK				(1 << 0)
 
 /* RM_RSTST_GFX specific bits */
-#define OMAP24XX_GFX_SW_RST				(1 << 4)
+#define OMAP24XX_GFX_SW_RST_MASK			(1 << 4)
 
 /* PM_PWSTCTRL_GFX specific bits */
 
@@ -209,25 +209,25 @@
 
 /* RM_RSTST_WKUP specific bits */
 /* 2430 calls EXTWMPU_RST "EXTWARM_RST" and GLOBALWMPU_RST "GLOBALWARM_RST" */
-#define OMAP24XX_EXTWMPU_RST				(1 << 6)
-#define OMAP24XX_SECU_WD_RST				(1 << 5)
-#define OMAP24XX_MPU_WD_RST				(1 << 4)
-#define OMAP24XX_SECU_VIOL_RST				(1 << 3)
+#define OMAP24XX_EXTWMPU_RST_MASK			(1 << 6)
+#define OMAP24XX_SECU_WD_RST_MASK			(1 << 5)
+#define OMAP24XX_MPU_WD_RST_MASK			(1 << 4)
+#define OMAP24XX_SECU_VIOL_RST_MASK			(1 << 3)
 
 /* PM_WKEN_WKUP specific bits */
 
 /* PM_WKST_WKUP specific bits */
 
 /* RM_RSTCTRL_DSP */
-#define OMAP2420_RST_IVA				(1 << 8)
-#define OMAP24XX_RST2_DSP				(1 << 1)
-#define OMAP24XX_RST1_DSP				(1 << 0)
+#define OMAP2420_RST_IVA_MASK				(1 << 8)
+#define OMAP24XX_RST2_DSP_MASK				(1 << 1)
+#define OMAP24XX_RST1_DSP_MASK				(1 << 0)
 
 /* RM_RSTST_DSP specific bits */
 /* 2430 calls GLOBALWMPU_RST "GLOBALWARM_RST" */
-#define OMAP2420_IVA_SW_RST				(1 << 8)
-#define OMAP24XX_DSP_SW_RST2				(1 << 5)
-#define OMAP24XX_DSP_SW_RST1				(1 << 4)
+#define OMAP2420_IVA_SW_RST_MASK			(1 << 8)
+#define OMAP24XX_DSP_SW_RST2_MASK			(1 << 5)
+#define OMAP24XX_DSP_SW_RST1_MASK			(1 << 4)
 
 /* PM_WKDEP_DSP specific bits */
 
@@ -235,7 +235,7 @@
 /* 2430 only: MEMONSTATE, MEMRETSTATE */
 #define OMAP2420_MEMIONSTATE_SHIFT			12
 #define OMAP2420_MEMIONSTATE_MASK			(0x3 << 12)
-#define OMAP2420_MEMIRETSTATE				(1 << 4)
+#define OMAP2420_MEMIRETSTATE_MASK			(1 << 4)
 
 /* PM_PWSTST_DSP specific bits */
 /* MEMSTATEST is 2430 only */
@@ -248,18 +248,18 @@
 
 /* RM_RSTCTRL_MDM */
 /* 2430 only */
-#define OMAP2430_PWRON1_MDM				(1 << 1)
-#define OMAP2430_RST1_MDM				(1 << 0)
+#define OMAP2430_PWRON1_MDM_MASK			(1 << 1)
+#define OMAP2430_RST1_MDM_MASK				(1 << 0)
 
 /* RM_RSTST_MDM specific bits */
 /* 2430 only */
-#define OMAP2430_MDM_SECU_VIOL				(1 << 6)
-#define OMAP2430_MDM_SW_PWRON1				(1 << 5)
-#define OMAP2430_MDM_SW_RST1				(1 << 4)
+#define OMAP2430_MDM_SECU_VIOL_MASK			(1 << 6)
+#define OMAP2430_MDM_SW_PWRON1_MASK			(1 << 5)
+#define OMAP2430_MDM_SW_RST1_MASK			(1 << 4)
 
 /* PM_WKEN_MDM */
 /* 2430 only */
-#define OMAP2430_PM_WKEN_MDM_EN_MDM			(1 << 0)
+#define OMAP2430_PM_WKEN_MDM_EN_MDM_MASK		(1 << 0)
 
 /* PM_WKST_MDM specific bits */
 /* 2430 only */
@@ -269,7 +269,7 @@
 
 /* PM_PWSTCTRL_MDM specific bits */
 /* 2430 only */
-#define OMAP2430_KILLDOMAINWKUP				(1 << 19)
+#define OMAP2430_KILLDOMAINWKUP_MASK			(1 << 19)
 
 /* PM_PWSTST_MDM specific bits */
 /* 2430 only */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 8f21bae6..7fd6023 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -35,10 +35,10 @@
 #define OMAP3430_ERRORGAIN_MASK				(0xff << 16)
 #define OMAP3430_INITVOLTAGE_SHIFT			8
 #define OMAP3430_INITVOLTAGE_MASK			(0xff << 8)
-#define OMAP3430_TIMEOUTEN				(1 << 3)
-#define OMAP3430_INITVDD				(1 << 2)
-#define OMAP3430_FORCEUPDATE				(1 << 1)
-#define OMAP3430_VPENABLE				(1 << 0)
+#define OMAP3430_TIMEOUTEN_MASK				(1 << 3)
+#define OMAP3430_INITVDD_MASK				(1 << 2)
+#define OMAP3430_FORCEUPDATE_MASK			(1 << 1)
+#define OMAP3430_VPENABLE_MASK				(1 << 0)
 
 /* PRM_VP1_VSTEPMIN, PRM_VP2_VSTEPMIN shared bits */
 #define OMAP3430_SMPSWAITTIMEMIN_SHIFT			8
@@ -65,53 +65,53 @@
 #define OMAP3430_VPVOLTAGE_MASK				(0xff << 0)
 
 /* PRM_VP1_STATUS, PRM_VP2_STATUS shared bits */
-#define OMAP3430_VPINIDLE				(1 << 0)
+#define OMAP3430_VPINIDLE_MASK				(1 << 0)
 
 /* PM_WKDEP_IVA2, PM_WKDEP_MPU shared bits */
 #define OMAP3430_EN_PER_SHIFT				7
 #define OMAP3430_EN_PER_MASK				(1 << 7)
 
 /* PM_PWSTCTRL_IVA2, PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE shared bits */
-#define OMAP3430_MEMORYCHANGE				(1 << 3)
+#define OMAP3430_MEMORYCHANGE_MASK			(1 << 3)
 
 /* PM_PWSTST_IVA2, PM_PWSTST_CORE shared bits */
-#define OMAP3430_LOGICSTATEST				(1 << 2)
+#define OMAP3430_LOGICSTATEST_MASK			(1 << 2)
 
 /* PM_PREPWSTST_IVA2, PM_PREPWSTST_CORE shared bits */
-#define OMAP3430_LASTLOGICSTATEENTERED			(1 << 2)
+#define OMAP3430_LASTLOGICSTATEENTERED_MASK		(1 << 2)
 
 /*
  * PM_PREPWSTST_IVA2, PM_PREPWSTST_MPU, PM_PREPWSTST_CORE,
  * PM_PREPWSTST_GFX, PM_PREPWSTST_DSS, PM_PREPWSTST_CAM,
  * PM_PREPWSTST_PER, PM_PREPWSTST_NEON shared bits
  */
-#define OMAP3430_LASTPOWERSTATEENTERED_SHIFT			0
-#define OMAP3430_LASTPOWERSTATEENTERED_MASK			(0x3 << 0)
+#define OMAP3430_LASTPOWERSTATEENTERED_SHIFT		0
+#define OMAP3430_LASTPOWERSTATEENTERED_MASK		(0x3 << 0)
 
 /* PRM_IRQSTATUS_IVA2, PRM_IRQSTATUS_MPU shared bits */
-#define OMAP3430_WKUP_ST				(1 << 0)
+#define OMAP3430_WKUP_ST_MASK				(1 << 0)
 
 /* PRM_IRQENABLE_IVA2, PRM_IRQENABLE_MPU shared bits */
-#define OMAP3430_WKUP_EN					(1 << 0)
+#define OMAP3430_WKUP_EN_MASK				(1 << 0)
 
 /* PM_MPUGRPSEL1_CORE, PM_IVA2GRPSEL1_CORE shared bits */
-#define OMAP3430_GRPSEL_MMC2				(1 << 25)
-#define OMAP3430_GRPSEL_MMC1				(1 << 24)
-#define OMAP3430_GRPSEL_MCSPI4				(1 << 21)
-#define OMAP3430_GRPSEL_MCSPI3				(1 << 20)
-#define OMAP3430_GRPSEL_MCSPI2				(1 << 19)
-#define OMAP3430_GRPSEL_MCSPI1				(1 << 18)
-#define OMAP3430_GRPSEL_I2C3				(1 << 17)
-#define OMAP3430_GRPSEL_I2C2				(1 << 16)
-#define OMAP3430_GRPSEL_I2C1				(1 << 15)
-#define OMAP3430_GRPSEL_UART2				(1 << 14)
-#define OMAP3430_GRPSEL_UART1				(1 << 13)
-#define OMAP3430_GRPSEL_GPT11				(1 << 12)
-#define OMAP3430_GRPSEL_GPT10				(1 << 11)
-#define OMAP3430_GRPSEL_MCBSP5				(1 << 10)
-#define OMAP3430_GRPSEL_MCBSP1				(1 << 9)
-#define OMAP3430_GRPSEL_HSOTGUSB			(1 << 4)
-#define OMAP3430_GRPSEL_D2D				(1 << 3)
+#define OMAP3430_GRPSEL_MMC2_MASK			(1 << 25)
+#define OMAP3430_GRPSEL_MMC1_MASK			(1 << 24)
+#define OMAP3430_GRPSEL_MCSPI4_MASK			(1 << 21)
+#define OMAP3430_GRPSEL_MCSPI3_MASK			(1 << 20)
+#define OMAP3430_GRPSEL_MCSPI2_MASK			(1 << 19)
+#define OMAP3430_GRPSEL_MCSPI1_MASK			(1 << 18)
+#define OMAP3430_GRPSEL_I2C3_MASK			(1 << 17)
+#define OMAP3430_GRPSEL_I2C2_MASK			(1 << 16)
+#define OMAP3430_GRPSEL_I2C1_MASK			(1 << 15)
+#define OMAP3430_GRPSEL_UART2_MASK			(1 << 14)
+#define OMAP3430_GRPSEL_UART1_MASK			(1 << 13)
+#define OMAP3430_GRPSEL_GPT11_MASK			(1 << 12)
+#define OMAP3430_GRPSEL_GPT10_MASK			(1 << 11)
+#define OMAP3430_GRPSEL_MCBSP5_MASK			(1 << 10)
+#define OMAP3430_GRPSEL_MCBSP1_MASK			(1 << 9)
+#define OMAP3430_GRPSEL_HSOTGUSB_MASK			(1 << 4)
+#define OMAP3430_GRPSEL_D2D_MASK			(1 << 3)
 
 /*
  * PM_PWSTCTRL_GFX, PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM,
@@ -119,49 +119,49 @@
  */
 #define OMAP3430_MEMONSTATE_SHIFT			16
 #define OMAP3430_MEMONSTATE_MASK			(0x3 << 16)
-#define OMAP3430_MEMRETSTATE				(1 << 8)
+#define OMAP3430_MEMRETSTATE_MASK			(1 << 8)
 
 /* PM_MPUGRPSEL_PER, PM_IVA2GRPSEL_PER shared bits */
-#define OMAP3430_GRPSEL_GPIO6				(1 << 17)
-#define OMAP3430_GRPSEL_GPIO5				(1 << 16)
-#define OMAP3430_GRPSEL_GPIO4				(1 << 15)
-#define OMAP3430_GRPSEL_GPIO3				(1 << 14)
-#define OMAP3430_GRPSEL_GPIO2				(1 << 13)
-#define OMAP3430_GRPSEL_UART3				(1 << 11)
-#define OMAP3430_GRPSEL_GPT9				(1 << 10)
-#define OMAP3430_GRPSEL_GPT8				(1 << 9)
-#define OMAP3430_GRPSEL_GPT7				(1 << 8)
-#define OMAP3430_GRPSEL_GPT6				(1 << 7)
-#define OMAP3430_GRPSEL_GPT5				(1 << 6)
-#define OMAP3430_GRPSEL_GPT4				(1 << 5)
-#define OMAP3430_GRPSEL_GPT3				(1 << 4)
-#define OMAP3430_GRPSEL_GPT2				(1 << 3)
-#define OMAP3430_GRPSEL_MCBSP4				(1 << 2)
-#define OMAP3430_GRPSEL_MCBSP3				(1 << 1)
-#define OMAP3430_GRPSEL_MCBSP2				(1 << 0)
+#define OMAP3430_GRPSEL_GPIO6_MASK			(1 << 17)
+#define OMAP3430_GRPSEL_GPIO5_MASK			(1 << 16)
+#define OMAP3430_GRPSEL_GPIO4_MASK			(1 << 15)
+#define OMAP3430_GRPSEL_GPIO3_MASK			(1 << 14)
+#define OMAP3430_GRPSEL_GPIO2_MASK			(1 << 13)
+#define OMAP3430_GRPSEL_UART3_MASK			(1 << 11)
+#define OMAP3430_GRPSEL_GPT9_MASK			(1 << 10)
+#define OMAP3430_GRPSEL_GPT8_MASK			(1 << 9)
+#define OMAP3430_GRPSEL_GPT7_MASK			(1 << 8)
+#define OMAP3430_GRPSEL_GPT6_MASK			(1 << 7)
+#define OMAP3430_GRPSEL_GPT5_MASK			(1 << 6)
+#define OMAP3430_GRPSEL_GPT4_MASK			(1 << 5)
+#define OMAP3430_GRPSEL_GPT3_MASK			(1 << 4)
+#define OMAP3430_GRPSEL_GPT2_MASK			(1 << 3)
+#define OMAP3430_GRPSEL_MCBSP4_MASK			(1 << 2)
+#define OMAP3430_GRPSEL_MCBSP3_MASK			(1 << 1)
+#define OMAP3430_GRPSEL_MCBSP2_MASK			(1 << 0)
 
 /* PM_MPUGRPSEL_WKUP, PM_IVA2GRPSEL_WKUP shared bits */
-#define OMAP3430_GRPSEL_IO				(1 << 8)
-#define OMAP3430_GRPSEL_SR2				(1 << 7)
-#define OMAP3430_GRPSEL_SR1				(1 << 6)
-#define OMAP3430_GRPSEL_GPIO1				(1 << 3)
-#define OMAP3430_GRPSEL_GPT12				(1 << 1)
-#define OMAP3430_GRPSEL_GPT1				(1 << 0)
+#define OMAP3430_GRPSEL_IO_MASK				(1 << 8)
+#define OMAP3430_GRPSEL_SR2_MASK			(1 << 7)
+#define OMAP3430_GRPSEL_SR1_MASK			(1 << 6)
+#define OMAP3430_GRPSEL_GPIO1_MASK			(1 << 3)
+#define OMAP3430_GRPSEL_GPT12_MASK			(1 << 1)
+#define OMAP3430_GRPSEL_GPT1_MASK			(1 << 0)
 
 /* Bits specific to each register */
 
 /* RM_RSTCTRL_IVA2 */
-#define OMAP3430_RST3_IVA2				(1 << 2)
-#define OMAP3430_RST2_IVA2				(1 << 1)
-#define OMAP3430_RST1_IVA2				(1 << 0)
+#define OMAP3430_RST3_IVA2_MASK				(1 << 2)
+#define OMAP3430_RST2_IVA2_MASK				(1 << 1)
+#define OMAP3430_RST1_IVA2_MASK				(1 << 0)
 
 /* RM_RSTST_IVA2 specific bits */
-#define OMAP3430_EMULATION_VSEQ_RST			(1 << 13)
-#define OMAP3430_EMULATION_VHWA_RST			(1 << 12)
-#define OMAP3430_EMULATION_IVA2_RST			(1 << 11)
-#define OMAP3430_IVA2_SW_RST3				(1 << 10)
-#define OMAP3430_IVA2_SW_RST2				(1 << 9)
-#define OMAP3430_IVA2_SW_RST1				(1 << 8)
+#define OMAP3430_EMULATION_VSEQ_RST_MASK		(1 << 13)
+#define OMAP3430_EMULATION_VHWA_RST_MASK		(1 << 12)
+#define OMAP3430_EMULATION_IVA2_RST_MASK		(1 << 11)
+#define OMAP3430_IVA2_SW_RST3_MASK			(1 << 10)
+#define OMAP3430_IVA2_SW_RST2_MASK			(1 << 9)
+#define OMAP3430_IVA2_SW_RST1_MASK			(1 << 8)
 
 /* PM_WKDEP_IVA2 specific bits */
 
@@ -174,10 +174,10 @@
 #define OMAP3430_L1FLATMEMONSTATE_MASK			(0x3 << 18)
 #define OMAP3430_SHAREDL1CACHEFLATONSTATE_SHIFT		16
 #define OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK		(0x3 << 16)
-#define OMAP3430_L2FLATMEMRETSTATE			(1 << 11)
-#define OMAP3430_SHAREDL2CACHEFLATRETSTATE		(1 << 10)
-#define OMAP3430_L1FLATMEMRETSTATE			(1 << 9)
-#define OMAP3430_SHAREDL1CACHEFLATRETSTATE		(1 << 8)
+#define OMAP3430_L2FLATMEMRETSTATE_MASK			(1 << 11)
+#define OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK		(1 << 10)
+#define OMAP3430_L1FLATMEMRETSTATE_MASK			(1 << 9)
+#define OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK		(1 << 8)
 
 /* PM_PWSTST_IVA2 specific bits */
 #define OMAP3430_L2FLATMEMSTATEST_SHIFT			10
@@ -200,12 +200,12 @@
 #define OMAP3430_LASTSHAREDL1CACHEFLATSTATEENTERED_MASK		(0x3 << 4)
 
 /* PRM_IRQSTATUS_IVA2 specific bits */
-#define OMAP3430_PRM_IRQSTATUS_IVA2_IVA2_DPLL_ST	(1 << 2)
-#define OMAP3430_FORCEWKUP_ST				(1 << 1)
+#define OMAP3430_PRM_IRQSTATUS_IVA2_IVA2_DPLL_ST_MASK	(1 << 2)
+#define OMAP3430_FORCEWKUP_ST_MASK			(1 << 1)
 
 /* PRM_IRQENABLE_IVA2 specific bits */
-#define OMAP3430_PRM_IRQENABLE_IVA2_IVA2_DPLL_RECAL_EN		(1 << 2)
-#define OMAP3430_FORCEWKUP_EN					(1 << 1)
+#define OMAP3430_PRM_IRQENABLE_IVA2_IVA2_DPLL_RECAL_EN_MASK	(1 << 2)
+#define OMAP3430_FORCEWKUP_EN_MASK				(1 << 1)
 
 /* PRM_REVISION specific bits */
 
@@ -213,70 +213,70 @@
 
 /* PRM_IRQSTATUS_MPU specific bits */
 #define OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT		25
-#define OMAP3430ES2_SND_PERIPH_DPLL_ST			(1 << 25)
-#define OMAP3430_VC_TIMEOUTERR_ST			(1 << 24)
-#define OMAP3430_VC_RAERR_ST				(1 << 23)
-#define OMAP3430_VC_SAERR_ST				(1 << 22)
-#define OMAP3430_VP2_TRANXDONE_ST			(1 << 21)
-#define OMAP3430_VP2_EQVALUE_ST				(1 << 20)
-#define OMAP3430_VP2_NOSMPSACK_ST			(1 << 19)
-#define OMAP3430_VP2_MAXVDD_ST				(1 << 18)
-#define OMAP3430_VP2_MINVDD_ST				(1 << 17)
-#define OMAP3430_VP2_OPPCHANGEDONE_ST			(1 << 16)
-#define OMAP3430_VP1_TRANXDONE_ST			(1 << 15)
-#define OMAP3430_VP1_EQVALUE_ST				(1 << 14)
-#define OMAP3430_VP1_NOSMPSACK_ST			(1 << 13)
-#define OMAP3430_VP1_MAXVDD_ST				(1 << 12)
-#define OMAP3430_VP1_MINVDD_ST				(1 << 11)
-#define OMAP3430_VP1_OPPCHANGEDONE_ST			(1 << 10)
-#define OMAP3430_IO_ST					(1 << 9)
-#define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST		(1 << 8)
+#define OMAP3430ES2_SND_PERIPH_DPLL_ST_MASK		(1 << 25)
+#define OMAP3430_VC_TIMEOUTERR_ST_MASK			(1 << 24)
+#define OMAP3430_VC_RAERR_ST_MASK			(1 << 23)
+#define OMAP3430_VC_SAERR_ST_MASK			(1 << 22)
+#define OMAP3430_VP2_TRANXDONE_ST_MASK			(1 << 21)
+#define OMAP3430_VP2_EQVALUE_ST_MASK			(1 << 20)
+#define OMAP3430_VP2_NOSMPSACK_ST_MASK			(1 << 19)
+#define OMAP3430_VP2_MAXVDD_ST_MASK			(1 << 18)
+#define OMAP3430_VP2_MINVDD_ST_MASK			(1 << 17)
+#define OMAP3430_VP2_OPPCHANGEDONE_ST_MASK		(1 << 16)
+#define OMAP3430_VP1_TRANXDONE_ST_MASK			(1 << 15)
+#define OMAP3430_VP1_EQVALUE_ST_MASK			(1 << 14)
+#define OMAP3430_VP1_NOSMPSACK_ST_MASK			(1 << 13)
+#define OMAP3430_VP1_MAXVDD_ST_MASK			(1 << 12)
+#define OMAP3430_VP1_MINVDD_ST_MASK			(1 << 11)
+#define OMAP3430_VP1_OPPCHANGEDONE_ST_MASK		(1 << 10)
+#define OMAP3430_IO_ST_MASK				(1 << 9)
+#define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_MASK	(1 << 8)
 #define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT	8
-#define OMAP3430_MPU_DPLL_ST				(1 << 7)
+#define OMAP3430_MPU_DPLL_ST_MASK			(1 << 7)
 #define OMAP3430_MPU_DPLL_ST_SHIFT			7
-#define OMAP3430_PERIPH_DPLL_ST				(1 << 6)
+#define OMAP3430_PERIPH_DPLL_ST_MASK			(1 << 6)
 #define OMAP3430_PERIPH_DPLL_ST_SHIFT			6
-#define OMAP3430_CORE_DPLL_ST				(1 << 5)
+#define OMAP3430_CORE_DPLL_ST_MASK			(1 << 5)
 #define OMAP3430_CORE_DPLL_ST_SHIFT			5
-#define OMAP3430_TRANSITION_ST				(1 << 4)
-#define OMAP3430_EVGENOFF_ST				(1 << 3)
-#define OMAP3430_EVGENON_ST				(1 << 2)
-#define OMAP3430_FS_USB_WKUP_ST				(1 << 1)
+#define OMAP3430_TRANSITION_ST_MASK			(1 << 4)
+#define OMAP3430_EVGENOFF_ST_MASK			(1 << 3)
+#define OMAP3430_EVGENON_ST_MASK			(1 << 2)
+#define OMAP3430_FS_USB_WKUP_ST_MASK			(1 << 1)
 
 /* PRM_IRQENABLE_MPU specific bits */
 #define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT		25
-#define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN			(1 << 25)
-#define OMAP3430_VC_TIMEOUTERR_EN				(1 << 24)
-#define OMAP3430_VC_RAERR_EN					(1 << 23)
-#define OMAP3430_VC_SAERR_EN					(1 << 22)
-#define OMAP3430_VP2_TRANXDONE_EN				(1 << 21)
-#define OMAP3430_VP2_EQVALUE_EN					(1 << 20)
-#define OMAP3430_VP2_NOSMPSACK_EN				(1 << 19)
-#define OMAP3430_VP2_MAXVDD_EN					(1 << 18)
-#define OMAP3430_VP2_MINVDD_EN					(1 << 17)
-#define OMAP3430_VP2_OPPCHANGEDONE_EN				(1 << 16)
-#define OMAP3430_VP1_TRANXDONE_EN				(1 << 15)
-#define OMAP3430_VP1_EQVALUE_EN					(1 << 14)
-#define OMAP3430_VP1_NOSMPSACK_EN				(1 << 13)
-#define OMAP3430_VP1_MAXVDD_EN					(1 << 12)
-#define OMAP3430_VP1_MINVDD_EN					(1 << 11)
-#define OMAP3430_VP1_OPPCHANGEDONE_EN				(1 << 10)
-#define OMAP3430_IO_EN						(1 << 9)
-#define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN		(1 << 8)
+#define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_MASK		(1 << 25)
+#define OMAP3430_VC_TIMEOUTERR_EN_MASK				(1 << 24)
+#define OMAP3430_VC_RAERR_EN_MASK				(1 << 23)
+#define OMAP3430_VC_SAERR_EN_MASK				(1 << 22)
+#define OMAP3430_VP2_TRANXDONE_EN_MASK				(1 << 21)
+#define OMAP3430_VP2_EQVALUE_EN_MASK				(1 << 20)
+#define OMAP3430_VP2_NOSMPSACK_EN_MASK				(1 << 19)
+#define OMAP3430_VP2_MAXVDD_EN_MASK				(1 << 18)
+#define OMAP3430_VP2_MINVDD_EN_MASK				(1 << 17)
+#define OMAP3430_VP2_OPPCHANGEDONE_EN_MASK			(1 << 16)
+#define OMAP3430_VP1_TRANXDONE_EN_MASK				(1 << 15)
+#define OMAP3430_VP1_EQVALUE_EN_MASK				(1 << 14)
+#define OMAP3430_VP1_NOSMPSACK_EN_MASK				(1 << 13)
+#define OMAP3430_VP1_MAXVDD_EN_MASK				(1 << 12)
+#define OMAP3430_VP1_MINVDD_EN_MASK				(1 << 11)
+#define OMAP3430_VP1_OPPCHANGEDONE_EN_MASK			(1 << 10)
+#define OMAP3430_IO_EN_MASK					(1 << 9)
+#define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_MASK	(1 << 8)
 #define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT	8
-#define OMAP3430_MPU_DPLL_RECAL_EN				(1 << 7)
+#define OMAP3430_MPU_DPLL_RECAL_EN_MASK				(1 << 7)
 #define OMAP3430_MPU_DPLL_RECAL_EN_SHIFT			7
-#define OMAP3430_PERIPH_DPLL_RECAL_EN				(1 << 6)
+#define OMAP3430_PERIPH_DPLL_RECAL_EN_MASK			(1 << 6)
 #define OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT			6
-#define OMAP3430_CORE_DPLL_RECAL_EN				(1 << 5)
+#define OMAP3430_CORE_DPLL_RECAL_EN_MASK			(1 << 5)
 #define OMAP3430_CORE_DPLL_RECAL_EN_SHIFT			5
-#define OMAP3430_TRANSITION_EN					(1 << 4)
-#define OMAP3430_EVGENOFF_EN					(1 << 3)
-#define OMAP3430_EVGENON_EN					(1 << 2)
-#define OMAP3430_FS_USB_WKUP_EN					(1 << 1)
+#define OMAP3430_TRANSITION_EN_MASK				(1 << 4)
+#define OMAP3430_EVGENOFF_EN_MASK				(1 << 3)
+#define OMAP3430_EVGENON_EN_MASK				(1 << 2)
+#define OMAP3430_FS_USB_WKUP_EN_MASK				(1 << 1)
 
 /* RM_RSTST_MPU specific bits */
-#define OMAP3430_EMULATION_MPU_RST			(1 << 11)
+#define OMAP3430_EMULATION_MPU_RST_MASK			(1 << 11)
 
 /* PM_WKDEP_MPU specific bits */
 #define OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT		5
@@ -289,7 +289,7 @@
 #define OMAP3430_OFFLOADMODE_MASK			(0x3 << 3)
 #define OMAP3430_ONLOADMODE_SHIFT			1
 #define OMAP3430_ONLOADMODE_MASK			(0x3 << 1)
-#define OMAP3430_ENABLE					(1 << 0)
+#define OMAP3430_ENABLE_MASK				(1 << 0)
 
 /* PM_EVGENONTIM_MPU */
 #define OMAP3430_ONTIMEVAL_SHIFT			0
@@ -302,32 +302,32 @@
 /* PM_PWSTCTRL_MPU specific bits */
 #define OMAP3430_L2CACHEONSTATE_SHIFT			16
 #define OMAP3430_L2CACHEONSTATE_MASK			(0x3 << 16)
-#define OMAP3430_L2CACHERETSTATE			(1 << 8)
-#define OMAP3430_LOGICL1CACHERETSTATE			(1 << 2)
+#define OMAP3430_L2CACHERETSTATE_MASK			(1 << 8)
+#define OMAP3430_LOGICL1CACHERETSTATE_MASK		(1 << 2)
 
 /* PM_PWSTST_MPU specific bits */
 #define OMAP3430_L2CACHESTATEST_SHIFT			6
 #define OMAP3430_L2CACHESTATEST_MASK			(0x3 << 6)
-#define OMAP3430_LOGICL1CACHESTATEST			(1 << 2)
+#define OMAP3430_LOGICL1CACHESTATEST_MASK		(1 << 2)
 
 /* PM_PREPWSTST_MPU specific bits */
 #define OMAP3430_LASTL2CACHESTATEENTERED_SHIFT		6
 #define OMAP3430_LASTL2CACHESTATEENTERED_MASK		(0x3 << 6)
-#define OMAP3430_LASTLOGICL1CACHESTATEENTERED		(1 << 2)
+#define OMAP3430_LASTLOGICL1CACHESTATEENTERED_MASK	(1 << 2)
 
 /* RM_RSTCTRL_CORE */
-#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON		(1 << 1)
-#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST			(1 << 0)
+#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK		(1 << 1)
+#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK		(1 << 0)
 
 /* RM_RSTST_CORE specific bits */
-#define OMAP3430_MODEM_SECURITY_VIOL_RST		(1 << 10)
-#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RSTPWRON	(1 << 9)
-#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RST		(1 << 8)
+#define OMAP3430_MODEM_SECURITY_VIOL_RST_MASK		(1 << 10)
+#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RSTPWRON_MASK	(1 << 9)
+#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RST_MASK	(1 << 8)
 
 /* PM_WKEN1_CORE specific bits */
 
 /* PM_MPUGRPSEL1_CORE specific bits */
-#define OMAP3430_GRPSEL_FSHOSTUSB			(1 << 5)
+#define OMAP3430_GRPSEL_FSHOSTUSB_MASK			(1 << 5)
 
 /* PM_IVA2GRPSEL1_CORE specific bits */
 
@@ -338,8 +338,8 @@
 #define OMAP3430_MEM2ONSTATE_MASK			(0x3 << 18)
 #define OMAP3430_MEM1ONSTATE_SHIFT			16
 #define OMAP3430_MEM1ONSTATE_MASK			(0x3 << 16)
-#define OMAP3430_MEM2RETSTATE				(1 << 9)
-#define OMAP3430_MEM1RETSTATE				(1 << 8)
+#define OMAP3430_MEM2RETSTATE_MASK			(1 << 9)
+#define OMAP3430_MEM1RETSTATE_MASK			(1 << 8)
 
 /* PM_PWSTST_CORE specific bits */
 #define OMAP3430_MEM2STATEST_SHIFT			6
@@ -356,7 +356,7 @@
 /* RM_RSTST_GFX specific bits */
 
 /* PM_WKDEP_GFX specific bits */
-#define OMAP3430_PM_WKDEP_GFX_EN_IVA2			(1 << 2)
+#define OMAP3430_PM_WKDEP_GFX_EN_IVA2_MASK		(1 << 2)
 
 /* PM_PWSTCTRL_GFX specific bits */
 
@@ -365,33 +365,33 @@
 /* PM_PREPWSTST_GFX specific bits */
 
 /* PM_WKEN_WKUP specific bits */
-#define OMAP3430_EN_IO_CHAIN				(1 << 16)
-#define OMAP3430_EN_IO					(1 << 8)
-#define OMAP3430_EN_GPIO1				(1 << 3)
+#define OMAP3430_EN_IO_CHAIN_MASK			(1 << 16)
+#define OMAP3430_EN_IO_MASK				(1 << 8)
+#define OMAP3430_EN_GPIO1_MASK				(1 << 3)
 
 /* PM_MPUGRPSEL_WKUP specific bits */
 
 /* PM_IVA2GRPSEL_WKUP specific bits */
 
 /* PM_WKST_WKUP specific bits */
-#define OMAP3430_ST_IO_CHAIN				(1 << 16)
-#define OMAP3430_ST_IO					(1 << 8)
+#define OMAP3430_ST_IO_CHAIN_MASK			(1 << 16)
+#define OMAP3430_ST_IO_MASK				(1 << 8)
 
 /* PRM_CLKSEL */
 #define OMAP3430_SYS_CLKIN_SEL_SHIFT			0
 #define OMAP3430_SYS_CLKIN_SEL_MASK			(0x7 << 0)
 
 /* PRM_CLKOUT_CTRL */
-#define OMAP3430_CLKOUT_EN				(1 << 7)
+#define OMAP3430_CLKOUT_EN_MASK				(1 << 7)
 #define OMAP3430_CLKOUT_EN_SHIFT			7
 
 /* RM_RSTST_DSS specific bits */
 
 /* PM_WKEN_DSS */
-#define OMAP3430_PM_WKEN_DSS_EN_DSS			(1 << 0)
+#define OMAP3430_PM_WKEN_DSS_EN_DSS_MASK		(1 << 0)
 
 /* PM_WKDEP_DSS specific bits */
-#define OMAP3430_PM_WKDEP_DSS_EN_IVA2			(1 << 2)
+#define OMAP3430_PM_WKDEP_DSS_EN_IVA2_MASK		(1 << 2)
 
 /* PM_PWSTCTRL_DSS specific bits */
 
@@ -402,7 +402,7 @@
 /* RM_RSTST_CAM specific bits */
 
 /* PM_WKDEP_CAM specific bits */
-#define OMAP3430_PM_WKDEP_CAM_EN_IVA2			(1 << 2)
+#define OMAP3430_PM_WKDEP_CAM_EN_IVA2_MASK		(1 << 2)
 
 /* PM_PWSTCTRL_CAM specific bits */
 
@@ -424,7 +424,7 @@
 /* PM_WKST_PER specific bits */
 
 /* PM_WKDEP_PER specific bits */
-#define OMAP3430_PM_WKDEP_PER_EN_IVA2			(1 << 2)
+#define OMAP3430_PM_WKDEP_PER_EN_IVA2_MASK		(1 << 2)
 
 /* PM_PWSTCTRL_PER specific bits */
 
@@ -467,26 +467,26 @@
 /* PRM_VC_CMD_VAL_1 specific bits */
 
 /* PRM_VC_CH_CONF */
-#define OMAP3430_CMD1					(1 << 20)
-#define OMAP3430_RACEN1					(1 << 19)
-#define OMAP3430_RAC1					(1 << 18)
-#define OMAP3430_RAV1					(1 << 17)
-#define OMAP3430_PRM_VC_CH_CONF_SA1			(1 << 16)
-#define OMAP3430_CMD0					(1 << 4)
-#define OMAP3430_RACEN0					(1 << 3)
-#define OMAP3430_RAC0					(1 << 2)
-#define OMAP3430_RAV0					(1 << 1)
-#define OMAP3430_PRM_VC_CH_CONF_SA0			(1 << 0)
+#define OMAP3430_CMD1_MASK				(1 << 20)
+#define OMAP3430_RACEN1_MASK				(1 << 19)
+#define OMAP3430_RAC1_MASK				(1 << 18)
+#define OMAP3430_RAV1_MASK				(1 << 17)
+#define OMAP3430_PRM_VC_CH_CONF_SA1_MASK		(1 << 16)
+#define OMAP3430_CMD0_MASK				(1 << 4)
+#define OMAP3430_RACEN0_MASK				(1 << 3)
+#define OMAP3430_RAC0_MASK				(1 << 2)
+#define OMAP3430_RAV0_MASK				(1 << 1)
+#define OMAP3430_PRM_VC_CH_CONF_SA0_MASK		(1 << 0)
 
 /* PRM_VC_I2C_CFG */
-#define OMAP3430_HSMASTER				(1 << 5)
-#define OMAP3430_SREN					(1 << 4)
-#define OMAP3430_HSEN					(1 << 3)
+#define OMAP3430_HSMASTER_MASK				(1 << 5)
+#define OMAP3430_SREN_MASK				(1 << 4)
+#define OMAP3430_HSEN_MASK				(1 << 3)
 #define OMAP3430_MCODE_SHIFT				0
 #define OMAP3430_MCODE_MASK				(0x7 << 0)
 
 /* PRM_VC_BYPASS_VAL */
-#define OMAP3430_VALID					(1 << 24)
+#define OMAP3430_VALID_MASK				(1 << 24)
 #define OMAP3430_DATA_SHIFT				16
 #define OMAP3430_DATA_MASK				(0xff << 16)
 #define OMAP3430_REGADDR_SHIFT				8
@@ -495,8 +495,8 @@
 #define OMAP3430_SLAVEADDR_MASK				(0x7f << 0)
 
 /* PRM_RSTCTRL */
-#define OMAP3430_RST_DPLL3				(1 << 2)
-#define OMAP3430_RST_GS					(1 << 1)
+#define OMAP3430_RST_DPLL3_MASK				(1 << 2)
+#define OMAP3430_RST_GS_MASK				(1 << 1)
 
 /* PRM_RSTTIME */
 #define OMAP3430_RSTTIME2_SHIFT				8
@@ -505,23 +505,23 @@
 #define OMAP3430_RSTTIME1_MASK				(0xff << 0)
 
 /* PRM_RSTST */
-#define OMAP3430_ICECRUSHER_RST				(1 << 10)
-#define OMAP3430_ICEPICK_RST				(1 << 9)
-#define OMAP3430_VDD2_VOLTAGE_MANAGER_RST		(1 << 8)
-#define OMAP3430_VDD1_VOLTAGE_MANAGER_RST		(1 << 7)
-#define OMAP3430_EXTERNAL_WARM_RST			(1 << 6)
-#define OMAP3430_SECURE_WD_RST				(1 << 5)
-#define OMAP3430_MPU_WD_RST				(1 << 4)
-#define OMAP3430_SECURITY_VIOL_RST			(1 << 3)
-#define OMAP3430_GLOBAL_SW_RST				(1 << 1)
-#define OMAP3430_GLOBAL_COLD_RST			(1 << 0)
+#define OMAP3430_ICECRUSHER_RST_MASK			(1 << 10)
+#define OMAP3430_ICEPICK_RST_MASK			(1 << 9)
+#define OMAP3430_VDD2_VOLTAGE_MANAGER_RST_MASK		(1 << 8)
+#define OMAP3430_VDD1_VOLTAGE_MANAGER_RST_MASK		(1 << 7)
+#define OMAP3430_EXTERNAL_WARM_RST_MASK			(1 << 6)
+#define OMAP3430_SECURE_WD_RST_MASK			(1 << 5)
+#define OMAP3430_MPU_WD_RST_MASK			(1 << 4)
+#define OMAP3430_SECURITY_VIOL_RST_MASK			(1 << 3)
+#define OMAP3430_GLOBAL_SW_RST_MASK			(1 << 1)
+#define OMAP3430_GLOBAL_COLD_RST_MASK			(1 << 0)
 
 /* PRM_VOLTCTRL */
-#define OMAP3430_SEL_VMODE				(1 << 4)
-#define OMAP3430_SEL_OFF				(1 << 3)
-#define OMAP3430_AUTO_OFF				(1 << 2)
-#define OMAP3430_AUTO_RET				(1 << 1)
-#define OMAP3430_AUTO_SLEEP				(1 << 0)
+#define OMAP3430_SEL_VMODE_MASK				(1 << 4)
+#define OMAP3430_SEL_OFF_MASK				(1 << 3)
+#define OMAP3430_AUTO_OFF_MASK				(1 << 2)
+#define OMAP3430_AUTO_RET_MASK				(1 << 1)
+#define OMAP3430_AUTO_SLEEP_MASK			(1 << 0)
 
 /* PRM_SRAM_PCHARGE */
 #define OMAP3430_PCHARGE_TIME_SHIFT			0
@@ -550,10 +550,10 @@
 #define OMAP3430_SETUP_TIME_MASK			(0xffff << 0)
 
 /* PRM_POLCTRL */
-#define OMAP3430_OFFMODE_POL				(1 << 3)
-#define OMAP3430_CLKOUT_POL				(1 << 2)
-#define OMAP3430_CLKREQ_POL				(1 << 1)
-#define OMAP3430_EXTVOL_POL				(1 << 0)
+#define OMAP3430_OFFMODE_POL_MASK			(1 << 3)
+#define OMAP3430_CLKOUT_POL_MASK			(1 << 2)
+#define OMAP3430_CLKREQ_POL_MASK			(1 << 1)
+#define OMAP3430_EXTVOL_POL_MASK			(1 << 0)
 
 /* PRM_VOLTSETUP2 */
 #define OMAP3430_OFFMODESETUPTIME_SHIFT			0
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 5fba2aa..588873b 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -24,8 +24,8 @@
 		OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
 #define OMAP44XX_PRM_REGADDR(module, reg)				\
 		OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg))
-#define OMAP44XX_CHIRONSS_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP4430_CHIRONSS_BASE + (module) + (reg))
+#define OMAP44XX_PRCM_MPU_REGADDR(module, reg)				\
+		OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (module) + (reg))
 
 #include "prm44xx.h"
 
@@ -284,7 +284,7 @@
 #define OMAP_OFFLOADMODE_MASK				(0x3 << 3)
 #define OMAP_ONLOADMODE_SHIFT				1
 #define OMAP_ONLOADMODE_MASK				(0x3 << 1)
-#define OMAP_ENABLE					(1 << 0)
+#define OMAP_ENABLE_MASK				(1 << 0)
 
 /* PRM_RSTTIME */
 /* Named RM_RSTTIME_WKUP on the 24xx */
@@ -296,8 +296,8 @@
 /* PRM_RSTCTRL */
 /* Named RM_RSTCTRL_WKUP on the 24xx */
 /* 2420 calls RST_DPLL3 'RST_DPLL' */
-#define OMAP_RST_DPLL3					(1 << 2)
-#define OMAP_RST_GS					(1 << 1)
+#define OMAP_RST_DPLL3_MASK				(1 << 2)
+#define OMAP_RST_GS_MASK				(1 << 1)
 
 
 /*
@@ -316,7 +316,7 @@
  *	 PM_PWSTST_DSS, PM_PWSTST_CAM, PM_PWSTST_PER, PM_PWSTST_EMU,
  *	 PM_PWSTST_NEON
  */
-#define OMAP_INTRANSITION				(1 << 20)
+#define OMAP_INTRANSITION_MASK				(1 << 20)
 
 
 /*
@@ -338,7 +338,7 @@
  * 3430: RM_RSTST_IVA2, RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSS,
  *	 RM_RSTST_CAM, RM_RSTST_PER, RM_RSTST_NEON
  */
-#define OMAP_COREDOMAINWKUP_RST				(1 << 3)
+#define OMAP_COREDOMAINWKUP_RST_MASK			(1 << 3)
 
 /*
  * 24XX: RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSP
@@ -347,7 +347,7 @@
  *
  * 3430: RM_RSTST_CORE, RM_RSTST_EMU
  */
-#define OMAP_DOMAINWKUP_RST				(1 << 2)
+#define OMAP_DOMAINWKUP_RST_MASK			(1 << 2)
 
 /*
  * 24XX: RM_RSTST_MPU, RM_RSTST_WKUP, RM_RSTST_DSP
@@ -357,8 +357,8 @@
  *
  * 3430: RM_RSTST_CORE, RM_RSTST_EMU
  */
-#define OMAP_GLOBALWARM_RST				(1 << 1)
-#define OMAP_GLOBALCOLD_RST				(1 << 0)
+#define OMAP_GLOBALWARM_RST_MASK			(1 << 1)
+#define OMAP_GLOBALCOLD_RST_MASK			(1 << 0)
 
 /*
  * 24XX: PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_CORE, PM_WKDEP_DSP
@@ -382,7 +382,7 @@
  *	 PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM, PM_PWSTCTRL_PER,
  *	 PM_PWSTCTRL_NEON
  */
-#define OMAP_LOGICRETSTATE				(1 << 2)
+#define OMAP_LOGICRETSTATE_MASK				(1 << 2)
 
 /*
  * 24XX: PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index adb2558..fe8ef26 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -1,8 +1,8 @@
 /*
  * OMAP44xx PRM instance offset macros
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
  *
  * Paul Walmsley (paul@pwsan.com)
  * Rajendra Nayak (rnayak@ti.com)
@@ -25,387 +25,726 @@
 
 /* PRM */
 
-
 /* PRM.OCP_SOCKET_PRM register offsets */
+#define OMAP4_REVISION_PRM_OFFSET			0x0000
 #define OMAP4430_REVISION_PRM				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_PRM_IRQSTATUS_MPU_OFFSET			0x0010
 #define OMAP4430_PRM_IRQSTATUS_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0010)
+#define OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET		0x0014
 #define OMAP4430_PRM_IRQSTATUS_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0014)
+#define OMAP4_PRM_IRQENABLE_MPU_OFFSET			0x0018
 #define OMAP4430_PRM_IRQENABLE_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0018)
+#define OMAP4_PRM_IRQENABLE_MPU_2_OFFSET		0x001c
 #define OMAP4430_PRM_IRQENABLE_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x001c)
+#define OMAP4_PRM_IRQSTATUS_DUCATI_OFFSET		0x0020
 #define OMAP4430_PRM_IRQSTATUS_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0020)
+#define OMAP4_PRM_IRQENABLE_DUCATI_OFFSET		0x0028
 #define OMAP4430_PRM_IRQENABLE_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0028)
+#define OMAP4_PRM_IRQSTATUS_TESLA_OFFSET		0x0030
 #define OMAP4430_PRM_IRQSTATUS_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0030)
+#define OMAP4_PRM_IRQENABLE_TESLA_OFFSET		0x0038
 #define OMAP4430_PRM_IRQENABLE_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0038)
+#define OMAP4_PRM_PRM_PROFILING_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_PRM_PRM_PROFILING_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
 
 /* PRM.CKGEN_PRM register offsets */
+#define OMAP4_CM_ABE_DSS_SYS_CLKSEL_OFFSET		0x0000
 #define OMAP4430_CM_ABE_DSS_SYS_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_DPLL_SYS_REF_CLKSEL_OFFSET		0x0004
 #define OMAP4430_CM_DPLL_SYS_REF_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0004)
+#define OMAP4_CM_L4_WKUP_CLKSEL_OFFSET			0x0008
 #define OMAP4430_CM_L4_WKUP_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_ABE_PLL_REF_CLKSEL_OFFSET		0x000c
 #define OMAP4430_CM_ABE_PLL_REF_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x000c)
+#define OMAP4_CM_SYS_CLKSEL_OFFSET			0x0010
 #define OMAP4430_CM_SYS_CLKSEL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0010)
 
 /* PRM.MPU_PRM register offsets */
+#define OMAP4_PM_MPU_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_MPU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0000)
+#define OMAP4_PM_MPU_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_MPU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0004)
+#define OMAP4_RM_MPU_RSTST_OFFSET			0x0014
 #define OMAP4430_RM_MPU_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0014)
+#define OMAP4_RM_MPU_MPU_CONTEXT_OFFSET			0x0024
 #define OMAP4430_RM_MPU_MPU_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0024)
 
 /* PRM.TESLA_PRM register offsets */
+#define OMAP4_PM_TESLA_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_TESLA_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0000)
+#define OMAP4_PM_TESLA_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_TESLA_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0004)
+#define OMAP4_RM_TESLA_RSTCTRL_OFFSET			0x0010
 #define OMAP4430_RM_TESLA_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0010)
+#define OMAP4_RM_TESLA_RSTST_OFFSET			0x0014
 #define OMAP4430_RM_TESLA_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0014)
+#define OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_TESLA_TESLA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0024)
 
 /* PRM.ABE_PRM register offsets */
+#define OMAP4_PM_ABE_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_ABE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0000)
+#define OMAP4_PM_ABE_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_ABE_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0004)
+#define OMAP4_RM_ABE_AESS_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_ABE_AESS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x002c)
+#define OMAP4_PM_ABE_PDM_WKDEP_OFFSET			0x0030
 #define OMAP4430_PM_ABE_PDM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0030)
+#define OMAP4_RM_ABE_PDM_CONTEXT_OFFSET			0x0034
 #define OMAP4430_RM_ABE_PDM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0034)
+#define OMAP4_PM_ABE_DMIC_WKDEP_OFFSET			0x0038
 #define OMAP4430_PM_ABE_DMIC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0038)
+#define OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET		0x003c
 #define OMAP4430_RM_ABE_DMIC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x003c)
+#define OMAP4_PM_ABE_MCASP_WKDEP_OFFSET			0x0040
 #define OMAP4430_PM_ABE_MCASP_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0040)
+#define OMAP4_RM_ABE_MCASP_CONTEXT_OFFSET		0x0044
 #define OMAP4430_RM_ABE_MCASP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0044)
+#define OMAP4_PM_ABE_MCBSP1_WKDEP_OFFSET		0x0048
 #define OMAP4430_PM_ABE_MCBSP1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0048)
+#define OMAP4_RM_ABE_MCBSP1_CONTEXT_OFFSET		0x004c
 #define OMAP4430_RM_ABE_MCBSP1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x004c)
+#define OMAP4_PM_ABE_MCBSP2_WKDEP_OFFSET		0x0050
 #define OMAP4430_PM_ABE_MCBSP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0050)
+#define OMAP4_RM_ABE_MCBSP2_CONTEXT_OFFSET		0x0054
 #define OMAP4430_RM_ABE_MCBSP2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0054)
+#define OMAP4_PM_ABE_MCBSP3_WKDEP_OFFSET		0x0058
 #define OMAP4430_PM_ABE_MCBSP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0058)
+#define OMAP4_RM_ABE_MCBSP3_CONTEXT_OFFSET		0x005c
 #define OMAP4430_RM_ABE_MCBSP3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x005c)
+#define OMAP4_PM_ABE_SLIMBUS_WKDEP_OFFSET		0x0060
 #define OMAP4430_PM_ABE_SLIMBUS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0060)
+#define OMAP4_RM_ABE_SLIMBUS_CONTEXT_OFFSET		0x0064
 #define OMAP4430_RM_ABE_SLIMBUS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0064)
+#define OMAP4_PM_ABE_TIMER5_WKDEP_OFFSET		0x0068
 #define OMAP4430_PM_ABE_TIMER5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0068)
+#define OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET		0x006c
 #define OMAP4430_RM_ABE_TIMER5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x006c)
+#define OMAP4_PM_ABE_TIMER6_WKDEP_OFFSET		0x0070
 #define OMAP4430_PM_ABE_TIMER6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0070)
+#define OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET		0x0074
 #define OMAP4430_RM_ABE_TIMER6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0074)
+#define OMAP4_PM_ABE_TIMER7_WKDEP_OFFSET		0x0078
 #define OMAP4430_PM_ABE_TIMER7_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0078)
+#define OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET		0x007c
 #define OMAP4430_RM_ABE_TIMER7_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x007c)
+#define OMAP4_PM_ABE_TIMER8_WKDEP_OFFSET		0x0080
 #define OMAP4430_PM_ABE_TIMER8_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0080)
+#define OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET		0x0084
 #define OMAP4430_RM_ABE_TIMER8_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0084)
+#define OMAP4_PM_ABE_WDT3_WKDEP_OFFSET			0x0088
 #define OMAP4430_PM_ABE_WDT3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0088)
+#define OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET		0x008c
 #define OMAP4430_RM_ABE_WDT3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x008c)
 
 /* PRM.ALWAYS_ON_PRM register offsets */
+#define OMAP4_RM_ALWON_MDMINTC_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_ALWON_MDMINTC_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0024)
+#define OMAP4_PM_ALWON_SR_MPU_WKDEP_OFFSET		0x0028
 #define OMAP4430_PM_ALWON_SR_MPU_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_ALWON_SR_MPU_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x002c)
+#define OMAP4_PM_ALWON_SR_IVA_WKDEP_OFFSET		0x0030
 #define OMAP4430_PM_ALWON_SR_IVA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET		0x0034
 #define OMAP4430_RM_ALWON_SR_IVA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0034)
+#define OMAP4_PM_ALWON_SR_CORE_WKDEP_OFFSET		0x0038
 #define OMAP4430_PM_ALWON_SR_CORE_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0038)
+#define OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET		0x003c
 #define OMAP4430_RM_ALWON_SR_CORE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x003c)
 
 /* PRM.CORE_PRM register offsets */
+#define OMAP4_PM_CORE_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_CORE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0000)
+#define OMAP4_PM_CORE_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_CORE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0004)
+#define OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_L3_1_L3_1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0024)
+#define OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET		0x0124
 #define OMAP4430_RM_L3_2_L3_2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0124)
+#define OMAP4_RM_L3_2_GPMC_CONTEXT_OFFSET		0x012c
 #define OMAP4430_RM_L3_2_GPMC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x012c)
+#define OMAP4_RM_L3_2_OCMC_RAM_CONTEXT_OFFSET		0x0134
 #define OMAP4430_RM_L3_2_OCMC_RAM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0134)
+#define OMAP4_RM_DUCATI_RSTCTRL_OFFSET			0x0210
 #define OMAP4430_RM_DUCATI_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0210)
+#define OMAP4_RM_DUCATI_RSTST_OFFSET			0x0214
 #define OMAP4430_RM_DUCATI_RSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0214)
+#define OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET		0x0224
 #define OMAP4430_RM_DUCATI_DUCATI_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0224)
+#define OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET		0x0324
 #define OMAP4430_RM_SDMA_SDMA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0324)
+#define OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET		0x0424
 #define OMAP4430_RM_MEMIF_DMM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0424)
+#define OMAP4_RM_MEMIF_EMIF_FW_CONTEXT_OFFSET		0x042c
 #define OMAP4430_RM_MEMIF_EMIF_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x042c)
+#define OMAP4_RM_MEMIF_EMIF_1_CONTEXT_OFFSET		0x0434
 #define OMAP4430_RM_MEMIF_EMIF_1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0434)
+#define OMAP4_RM_MEMIF_EMIF_2_CONTEXT_OFFSET		0x043c
 #define OMAP4430_RM_MEMIF_EMIF_2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x043c)
+#define OMAP4_RM_MEMIF_DLL_CONTEXT_OFFSET		0x0444
 #define OMAP4430_RM_MEMIF_DLL_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0444)
+#define OMAP4_RM_MEMIF_EMIF_H1_CONTEXT_OFFSET		0x0454
 #define OMAP4430_RM_MEMIF_EMIF_H1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0454)
+#define OMAP4_RM_MEMIF_EMIF_H2_CONTEXT_OFFSET		0x045c
 #define OMAP4430_RM_MEMIF_EMIF_H2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x045c)
+#define OMAP4_RM_MEMIF_DLL_H_CONTEXT_OFFSET		0x0464
 #define OMAP4430_RM_MEMIF_DLL_H_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0464)
+#define OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET		0x0524
 #define OMAP4430_RM_D2D_SAD2D_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0524)
+#define OMAP4_RM_D2D_MODEM_ICR_CONTEXT_OFFSET		0x052c
 #define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x052c)
+#define OMAP4_RM_D2D_SAD2D_FW_CONTEXT_OFFSET		0x0534
 #define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0534)
+#define OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET		0x0624
 #define OMAP4430_RM_L4CFG_L4_CFG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0624)
+#define OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET		0x062c
 #define OMAP4430_RM_L4CFG_HW_SEM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x062c)
+#define OMAP4_RM_L4CFG_MAILBOX_CONTEXT_OFFSET		0x0634
 #define OMAP4430_RM_L4CFG_MAILBOX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0634)
+#define OMAP4_RM_L4CFG_SAR_ROM_CONTEXT_OFFSET		0x063c
 #define OMAP4430_RM_L4CFG_SAR_ROM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x063c)
+#define OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET		0x0724
 #define OMAP4430_RM_L3INSTR_L3_3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0724)
+#define OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET	0x072c
 #define OMAP4430_RM_L3INSTR_L3_INSTR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x072c)
+#define OMAP4_RM_L3INSTR_OCP_WP1_CONTEXT_OFFSET		0x0744
 #define OMAP4430_RM_L3INSTR_OCP_WP1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0744)
 
 /* PRM.IVAHD_PRM register offsets */
+#define OMAP4_PM_IVAHD_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_IVAHD_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0000)
+#define OMAP4_PM_IVAHD_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_IVAHD_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0004)
+#define OMAP4_RM_IVAHD_RSTCTRL_OFFSET			0x0010
 #define OMAP4430_RM_IVAHD_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0010)
+#define OMAP4_RM_IVAHD_RSTST_OFFSET			0x0014
 #define OMAP4430_RM_IVAHD_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0014)
+#define OMAP4_RM_IVAHD_IVAHD_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_IVAHD_IVAHD_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0024)
+#define OMAP4_RM_IVAHD_SL2_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_IVAHD_SL2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x002c)
 
 /* PRM.CAM_PRM register offsets */
+#define OMAP4_PM_CAM_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_CAM_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0000)
+#define OMAP4_PM_CAM_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_CAM_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0004)
+#define OMAP4_RM_CAM_ISS_CONTEXT_OFFSET			0x0024
 #define OMAP4430_RM_CAM_ISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0024)
+#define OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_CAM_FDIF_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x002c)
 
 /* PRM.DSS_PRM register offsets */
+#define OMAP4_PM_DSS_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_DSS_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0000)
+#define OMAP4_PM_DSS_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_DSS_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0004)
+#define OMAP4_PM_DSS_DSS_WKDEP_OFFSET			0x0020
 #define OMAP4430_PM_DSS_DSS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0020)
+#define OMAP4_RM_DSS_DSS_CONTEXT_OFFSET			0x0024
 #define OMAP4430_RM_DSS_DSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0024)
+#define OMAP4_RM_DSS_DEISS_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_DSS_DEISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x002c)
 
 /* PRM.GFX_PRM register offsets */
+#define OMAP4_PM_GFX_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_GFX_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0000)
+#define OMAP4_PM_GFX_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_GFX_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0004)
+#define OMAP4_RM_GFX_GFX_CONTEXT_OFFSET			0x0024
 #define OMAP4430_RM_GFX_GFX_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0024)
 
 /* PRM.L3INIT_PRM register offsets */
+#define OMAP4_PM_L3INIT_PWRSTCTRL_OFFSET		0x0000
 #define OMAP4430_PM_L3INIT_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0000)
+#define OMAP4_PM_L3INIT_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_L3INIT_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0004)
+#define OMAP4_PM_L3INIT_MMC1_WKDEP_OFFSET		0x0028
 #define OMAP4430_PM_L3INIT_MMC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0028)
+#define OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_L3INIT_MMC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x002c)
+#define OMAP4_PM_L3INIT_MMC2_WKDEP_OFFSET		0x0030
 #define OMAP4430_PM_L3INIT_MMC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0030)
+#define OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET		0x0034
 #define OMAP4430_RM_L3INIT_MMC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0034)
+#define OMAP4_PM_L3INIT_HSI_WKDEP_OFFSET		0x0038
 #define OMAP4430_PM_L3INIT_HSI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0038)
+#define OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET		0x003c
 #define OMAP4430_RM_L3INIT_HSI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x003c)
+#define OMAP4_PM_L3INIT_UNIPRO1_WKDEP_OFFSET		0x0040
 #define OMAP4430_PM_L3INIT_UNIPRO1_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0040)
+#define OMAP4_RM_L3INIT_UNIPRO1_CONTEXT_OFFSET		0x0044
 #define OMAP4430_RM_L3INIT_UNIPRO1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0044)
+#define OMAP4_PM_L3INIT_USB_HOST_WKDEP_OFFSET		0x0058
 #define OMAP4430_PM_L3INIT_USB_HOST_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0058)
+#define OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET		0x005c
 #define OMAP4430_RM_L3INIT_USB_HOST_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x005c)
+#define OMAP4_PM_L3INIT_USB_OTG_WKDEP_OFFSET		0x0060
 #define OMAP4430_PM_L3INIT_USB_OTG_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0060)
+#define OMAP4_RM_L3INIT_USB_OTG_CONTEXT_OFFSET		0x0064
 #define OMAP4430_RM_L3INIT_USB_OTG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0064)
+#define OMAP4_PM_L3INIT_USB_TLL_WKDEP_OFFSET		0x0068
 #define OMAP4430_PM_L3INIT_USB_TLL_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0068)
+#define OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET		0x006c
 #define OMAP4430_RM_L3INIT_USB_TLL_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x006c)
+#define OMAP4_RM_L3INIT_P1500_CONTEXT_OFFSET		0x007c
 #define OMAP4430_RM_L3INIT_P1500_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x007c)
+#define OMAP4_RM_L3INIT_EMAC_CONTEXT_OFFSET		0x0084
 #define OMAP4430_RM_L3INIT_EMAC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0084)
+#define OMAP4_PM_L3INIT_SATA_WKDEP_OFFSET		0x0088
 #define OMAP4430_PM_L3INIT_SATA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0088)
+#define OMAP4_RM_L3INIT_SATA_CONTEXT_OFFSET		0x008c
 #define OMAP4430_RM_L3INIT_SATA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x008c)
+#define OMAP4_RM_L3INIT_TPPSS_CONTEXT_OFFSET		0x0094
 #define OMAP4430_RM_L3INIT_TPPSS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0094)
+#define OMAP4_PM_L3INIT_PCIESS_WKDEP_OFFSET		0x0098
 #define OMAP4430_PM_L3INIT_PCIESS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0098)
+#define OMAP4_RM_L3INIT_PCIESS_CONTEXT_OFFSET		0x009c
 #define OMAP4430_RM_L3INIT_PCIESS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x009c)
+#define OMAP4_RM_L3INIT_CCPTX_CONTEXT_OFFSET		0x00ac
 #define OMAP4430_RM_L3INIT_CCPTX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00ac)
+#define OMAP4_PM_L3INIT_XHPI_WKDEP_OFFSET		0x00c0
 #define OMAP4430_PM_L3INIT_XHPI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c0)
+#define OMAP4_RM_L3INIT_XHPI_CONTEXT_OFFSET		0x00c4
 #define OMAP4430_RM_L3INIT_XHPI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c4)
+#define OMAP4_PM_L3INIT_MMC6_WKDEP_OFFSET		0x00c8
 #define OMAP4430_PM_L3INIT_MMC6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c8)
+#define OMAP4_RM_L3INIT_MMC6_CONTEXT_OFFSET		0x00cc
 #define OMAP4430_RM_L3INIT_MMC6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00cc)
+#define OMAP4_PM_L3INIT_USB_HOST_FS_WKDEP_OFFSET	0x00d0
 #define OMAP4430_PM_L3INIT_USB_HOST_FS_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d0)
+#define OMAP4_RM_L3INIT_USB_HOST_FS_CONTEXT_OFFSET	0x00d4
 #define OMAP4430_RM_L3INIT_USB_HOST_FS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d4)
+#define OMAP4_RM_L3INIT_USBPHYOCP2SCP_CONTEXT_OFFSET	0x00e4
 #define OMAP4430_RM_L3INIT_USBPHYOCP2SCP_CONTEXT	OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00e4)
 
 /* PRM.L4PER_PRM register offsets */
+#define OMAP4_PM_L4PER_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_L4PER_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0000)
+#define OMAP4_PM_L4PER_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_L4PER_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0004)
+#define OMAP4_RM_L4PER_ADC_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_L4PER_ADC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0024)
+#define OMAP4_PM_L4PER_DMTIMER10_WKDEP_OFFSET		0x0028
 #define OMAP4430_PM_L4PER_DMTIMER10_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0028)
+#define OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_L4PER_DMTIMER10_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x002c)
+#define OMAP4_PM_L4PER_DMTIMER11_WKDEP_OFFSET		0x0030
 #define OMAP4430_PM_L4PER_DMTIMER11_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0030)
+#define OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET		0x0034
 #define OMAP4430_RM_L4PER_DMTIMER11_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0034)
+#define OMAP4_PM_L4PER_DMTIMER2_WKDEP_OFFSET		0x0038
 #define OMAP4430_PM_L4PER_DMTIMER2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0038)
+#define OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET		0x003c
 #define OMAP4430_RM_L4PER_DMTIMER2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x003c)
+#define OMAP4_PM_L4PER_DMTIMER3_WKDEP_OFFSET		0x0040
 #define OMAP4430_PM_L4PER_DMTIMER3_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0040)
+#define OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET		0x0044
 #define OMAP4430_RM_L4PER_DMTIMER3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0044)
+#define OMAP4_PM_L4PER_DMTIMER4_WKDEP_OFFSET		0x0048
 #define OMAP4430_PM_L4PER_DMTIMER4_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0048)
+#define OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET		0x004c
 #define OMAP4430_RM_L4PER_DMTIMER4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x004c)
+#define OMAP4_PM_L4PER_DMTIMER9_WKDEP_OFFSET		0x0050
 #define OMAP4430_PM_L4PER_DMTIMER9_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0050)
+#define OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET		0x0054
 #define OMAP4430_RM_L4PER_DMTIMER9_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0054)
+#define OMAP4_RM_L4PER_ELM_CONTEXT_OFFSET		0x005c
 #define OMAP4430_RM_L4PER_ELM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x005c)
+#define OMAP4_PM_L4PER_GPIO2_WKDEP_OFFSET		0x0060
 #define OMAP4430_PM_L4PER_GPIO2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0060)
+#define OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET		0x0064
 #define OMAP4430_RM_L4PER_GPIO2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0064)
+#define OMAP4_PM_L4PER_GPIO3_WKDEP_OFFSET		0x0068
 #define OMAP4430_PM_L4PER_GPIO3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0068)
+#define OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET		0x006c
 #define OMAP4430_RM_L4PER_GPIO3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x006c)
+#define OMAP4_PM_L4PER_GPIO4_WKDEP_OFFSET		0x0070
 #define OMAP4430_PM_L4PER_GPIO4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0070)
+#define OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET		0x0074
 #define OMAP4430_RM_L4PER_GPIO4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0074)
+#define OMAP4_PM_L4PER_GPIO5_WKDEP_OFFSET		0x0078
 #define OMAP4430_PM_L4PER_GPIO5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0078)
+#define OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET		0x007c
 #define OMAP4430_RM_L4PER_GPIO5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x007c)
+#define OMAP4_PM_L4PER_GPIO6_WKDEP_OFFSET		0x0080
 #define OMAP4430_PM_L4PER_GPIO6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0080)
+#define OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET		0x0084
 #define OMAP4430_RM_L4PER_GPIO6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0084)
+#define OMAP4_RM_L4PER_HDQ1W_CONTEXT_OFFSET		0x008c
 #define OMAP4430_RM_L4PER_HDQ1W_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x008c)
+#define OMAP4_PM_L4PER_HECC1_WKDEP_OFFSET		0x0090
 #define OMAP4430_PM_L4PER_HECC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0090)
+#define OMAP4_RM_L4PER_HECC1_CONTEXT_OFFSET		0x0094
 #define OMAP4430_RM_L4PER_HECC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0094)
+#define OMAP4_PM_L4PER_HECC2_WKDEP_OFFSET		0x0098
 #define OMAP4430_PM_L4PER_HECC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0098)
+#define OMAP4_RM_L4PER_HECC2_CONTEXT_OFFSET		0x009c
 #define OMAP4430_RM_L4PER_HECC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x009c)
+#define OMAP4_PM_L4PER_I2C1_WKDEP_OFFSET		0x00a0
 #define OMAP4430_PM_L4PER_I2C1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a0)
+#define OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET		0x00a4
 #define OMAP4430_RM_L4PER_I2C1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a4)
+#define OMAP4_PM_L4PER_I2C2_WKDEP_OFFSET		0x00a8
 #define OMAP4430_PM_L4PER_I2C2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a8)
+#define OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET		0x00ac
 #define OMAP4430_RM_L4PER_I2C2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ac)
+#define OMAP4_PM_L4PER_I2C3_WKDEP_OFFSET		0x00b0
 #define OMAP4430_PM_L4PER_I2C3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b0)
+#define OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET		0x00b4
 #define OMAP4430_RM_L4PER_I2C3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b4)
+#define OMAP4_PM_L4PER_I2C4_WKDEP_OFFSET		0x00b8
 #define OMAP4430_PM_L4PER_I2C4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b8)
+#define OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET		0x00bc
 #define OMAP4430_RM_L4PER_I2C4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00bc)
+#define OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET		0x00c0
 #define OMAP4430_RM_L4PER_L4_PER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00c0)
+#define OMAP4_PM_L4PER_MCASP2_WKDEP_OFFSET		0x00d0
 #define OMAP4430_PM_L4PER_MCASP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d0)
+#define OMAP4_RM_L4PER_MCASP2_CONTEXT_OFFSET		0x00d4
 #define OMAP4430_RM_L4PER_MCASP2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d4)
+#define OMAP4_PM_L4PER_MCASP3_WKDEP_OFFSET		0x00d8
 #define OMAP4430_PM_L4PER_MCASP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d8)
+#define OMAP4_RM_L4PER_MCASP3_CONTEXT_OFFSET		0x00dc
 #define OMAP4430_RM_L4PER_MCASP3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00dc)
+#define OMAP4_PM_L4PER_MCBSP4_WKDEP_OFFSET		0x00e0
 #define OMAP4430_PM_L4PER_MCBSP4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e0)
+#define OMAP4_RM_L4PER_MCBSP4_CONTEXT_OFFSET		0x00e4
 #define OMAP4430_RM_L4PER_MCBSP4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e4)
+#define OMAP4_RM_L4PER_MGATE_CONTEXT_OFFSET		0x00ec
 #define OMAP4430_RM_L4PER_MGATE_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ec)
+#define OMAP4_PM_L4PER_MCSPI1_WKDEP_OFFSET		0x00f0
 #define OMAP4430_PM_L4PER_MCSPI1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f0)
+#define OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET		0x00f4
 #define OMAP4430_RM_L4PER_MCSPI1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f4)
+#define OMAP4_PM_L4PER_MCSPI2_WKDEP_OFFSET		0x00f8
 #define OMAP4430_PM_L4PER_MCSPI2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f8)
+#define OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET		0x00fc
 #define OMAP4430_RM_L4PER_MCSPI2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00fc)
+#define OMAP4_PM_L4PER_MCSPI3_WKDEP_OFFSET		0x0100
 #define OMAP4430_PM_L4PER_MCSPI3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0100)
+#define OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET		0x0104
 #define OMAP4430_RM_L4PER_MCSPI3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0104)
+#define OMAP4_PM_L4PER_MCSPI4_WKDEP_OFFSET		0x0108
 #define OMAP4430_PM_L4PER_MCSPI4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0108)
+#define OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET		0x010c
 #define OMAP4430_RM_L4PER_MCSPI4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x010c)
+#define OMAP4_PM_L4PER_MMCSD3_WKDEP_OFFSET		0x0120
 #define OMAP4430_PM_L4PER_MMCSD3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0120)
+#define OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET		0x0124
 #define OMAP4430_RM_L4PER_MMCSD3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0124)
+#define OMAP4_PM_L4PER_MMCSD4_WKDEP_OFFSET		0x0128
 #define OMAP4430_PM_L4PER_MMCSD4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0128)
+#define OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET		0x012c
 #define OMAP4430_RM_L4PER_MMCSD4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x012c)
+#define OMAP4_RM_L4PER_MSPROHG_CONTEXT_OFFSET		0x0134
 #define OMAP4430_RM_L4PER_MSPROHG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0134)
+#define OMAP4_PM_L4PER_SLIMBUS2_WKDEP_OFFSET		0x0138
 #define OMAP4430_PM_L4PER_SLIMBUS2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0138)
+#define OMAP4_RM_L4PER_SLIMBUS2_CONTEXT_OFFSET		0x013c
 #define OMAP4430_RM_L4PER_SLIMBUS2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x013c)
+#define OMAP4_PM_L4PER_UART1_WKDEP_OFFSET		0x0140
 #define OMAP4430_PM_L4PER_UART1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0140)
+#define OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET		0x0144
 #define OMAP4430_RM_L4PER_UART1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0144)
+#define OMAP4_PM_L4PER_UART2_WKDEP_OFFSET		0x0148
 #define OMAP4430_PM_L4PER_UART2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0148)
+#define OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET		0x014c
 #define OMAP4430_RM_L4PER_UART2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x014c)
+#define OMAP4_PM_L4PER_UART3_WKDEP_OFFSET		0x0150
 #define OMAP4430_PM_L4PER_UART3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0150)
+#define OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET		0x0154
 #define OMAP4430_RM_L4PER_UART3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0154)
+#define OMAP4_PM_L4PER_UART4_WKDEP_OFFSET		0x0158
 #define OMAP4430_PM_L4PER_UART4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0158)
+#define OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET		0x015c
 #define OMAP4430_RM_L4PER_UART4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x015c)
+#define OMAP4_PM_L4PER_MMCSD5_WKDEP_OFFSET		0x0160
 #define OMAP4430_PM_L4PER_MMCSD5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0160)
+#define OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET		0x0164
 #define OMAP4430_RM_L4PER_MMCSD5_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0164)
+#define OMAP4_PM_L4PER_I2C5_WKDEP_OFFSET		0x0168
 #define OMAP4430_PM_L4PER_I2C5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0168)
+#define OMAP4_RM_L4PER_I2C5_CONTEXT_OFFSET		0x016c
 #define OMAP4430_RM_L4PER_I2C5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x016c)
+#define OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET		0x01a4
 #define OMAP4430_RM_L4SEC_AES1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01a4)
+#define OMAP4_RM_L4SEC_AES2_CONTEXT_OFFSET		0x01ac
 #define OMAP4430_RM_L4SEC_AES2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01ac)
+#define OMAP4_RM_L4SEC_DES3DES_CONTEXT_OFFSET		0x01b4
 #define OMAP4430_RM_L4SEC_DES3DES_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01b4)
+#define OMAP4_RM_L4SEC_PKAEIP29_CONTEXT_OFFSET		0x01bc
 #define OMAP4430_RM_L4SEC_PKAEIP29_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01bc)
+#define OMAP4_RM_L4SEC_RNG_CONTEXT_OFFSET		0x01c4
 #define OMAP4430_RM_L4SEC_RNG_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01c4)
+#define OMAP4_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET		0x01cc
 #define OMAP4430_RM_L4SEC_SHA2MD51_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01cc)
+#define OMAP4_RM_L4SEC_CRYPTODMA_CONTEXT_OFFSET		0x01dc
 #define OMAP4430_RM_L4SEC_CRYPTODMA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01dc)
 
 /* PRM.CEFUSE_PRM register offsets */
+#define OMAP4_PM_CEFUSE_PWRSTCTRL_OFFSET		0x0000
 #define OMAP4430_PM_CEFUSE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0000)
+#define OMAP4_PM_CEFUSE_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_CEFUSE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0004)
+#define OMAP4_RM_CEFUSE_CEFUSE_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_CEFUSE_CEFUSE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0024)
 
 /* PRM.WKUP_PRM register offsets */
+#define OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_WKUP_L4WKUP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0024)
+#define OMAP4_RM_WKUP_WDT1_CONTEXT_OFFSET		0x002c
 #define OMAP4430_RM_WKUP_WDT1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x002c)
+#define OMAP4_PM_WKUP_WDT2_WKDEP_OFFSET			0x0030
 #define OMAP4430_PM_WKUP_WDT2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0030)
+#define OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET		0x0034
 #define OMAP4430_RM_WKUP_WDT2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0034)
+#define OMAP4_PM_WKUP_GPIO1_WKDEP_OFFSET		0x0038
 #define OMAP4430_PM_WKUP_GPIO1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0038)
+#define OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET		0x003c
 #define OMAP4430_RM_WKUP_GPIO1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x003c)
+#define OMAP4_PM_WKUP_TIMER1_WKDEP_OFFSET		0x0040
 #define OMAP4430_PM_WKUP_TIMER1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0040)
+#define OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET		0x0044
 #define OMAP4430_RM_WKUP_TIMER1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0044)
+#define OMAP4_PM_WKUP_TIMER12_WKDEP_OFFSET		0x0048
 #define OMAP4430_PM_WKUP_TIMER12_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0048)
+#define OMAP4_RM_WKUP_TIMER12_CONTEXT_OFFSET		0x004c
 #define OMAP4430_RM_WKUP_TIMER12_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x004c)
+#define OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET		0x0054
 #define OMAP4430_RM_WKUP_SYNCTIMER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0054)
+#define OMAP4_PM_WKUP_USIM_WKDEP_OFFSET			0x0058
 #define OMAP4430_PM_WKUP_USIM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0058)
+#define OMAP4_RM_WKUP_USIM_CONTEXT_OFFSET		0x005c
 #define OMAP4430_RM_WKUP_USIM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x005c)
+#define OMAP4_RM_WKUP_SARRAM_CONTEXT_OFFSET		0x0064
 #define OMAP4430_RM_WKUP_SARRAM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0064)
+#define OMAP4_PM_WKUP_KEYBOARD_WKDEP_OFFSET		0x0078
 #define OMAP4430_PM_WKUP_KEYBOARD_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0078)
+#define OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET		0x007c
 #define OMAP4430_RM_WKUP_KEYBOARD_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x007c)
+#define OMAP4_PM_WKUP_RTC_WKDEP_OFFSET			0x0080
 #define OMAP4430_PM_WKUP_RTC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0080)
+#define OMAP4_RM_WKUP_RTC_CONTEXT_OFFSET		0x0084
 #define OMAP4430_RM_WKUP_RTC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0084)
 
 /* PRM.WKUP_CM register offsets */
+#define OMAP4_CM_WKUP_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_WKUP_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0000)
+#define OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_WKUP_L4WKUP_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0020)
+#define OMAP4_CM_WKUP_WDT1_CLKCTRL_OFFSET		0x0028
 #define OMAP4430_CM_WKUP_WDT1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0028)
+#define OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET		0x0030
 #define OMAP4430_CM_WKUP_WDT2_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0030)
+#define OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET		0x0038
 #define OMAP4430_CM_WKUP_GPIO1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0038)
+#define OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET		0x0040
 #define OMAP4430_CM_WKUP_TIMER1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0040)
+#define OMAP4_CM_WKUP_TIMER12_CLKCTRL_OFFSET		0x0048
 #define OMAP4430_CM_WKUP_TIMER12_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0048)
+#define OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET		0x0050
 #define OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0050)
+#define OMAP4_CM_WKUP_USIM_CLKCTRL_OFFSET		0x0058
 #define OMAP4430_CM_WKUP_USIM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0058)
+#define OMAP4_CM_WKUP_SARRAM_CLKCTRL_OFFSET		0x0060
 #define OMAP4430_CM_WKUP_SARRAM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0060)
+#define OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET		0x0078
 #define OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0078)
+#define OMAP4_CM_WKUP_RTC_CLKCTRL_OFFSET		0x0080
 #define OMAP4430_CM_WKUP_RTC_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0080)
+#define OMAP4_CM_WKUP_BANDGAP_CLKCTRL_OFFSET		0x0088
 #define OMAP4430_CM_WKUP_BANDGAP_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0088)
 
 /* PRM.EMU_PRM register offsets */
+#define OMAP4_PM_EMU_PWRSTCTRL_OFFSET			0x0000
 #define OMAP4430_PM_EMU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0000)
+#define OMAP4_PM_EMU_PWRSTST_OFFSET			0x0004
 #define OMAP4430_PM_EMU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0004)
+#define OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET		0x0024
 #define OMAP4430_RM_EMU_DEBUGSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0024)
 
 /* PRM.EMU_CM register offsets */
+#define OMAP4_CM_EMU_CLKSTCTRL_OFFSET			0x0000
 #define OMAP4430_CM_EMU_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0000)
+#define OMAP4_CM_EMU_DYNAMICDEP_OFFSET			0x0008
 #define OMAP4430_CM_EMU_DYNAMICDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0008)
+#define OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET		0x0020
 #define OMAP4430_CM_EMU_DEBUGSS_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0020)
 
 /* PRM.DEVICE_PRM register offsets */
+#define OMAP4_PRM_RSTCTRL_OFFSET			0x0000
 #define OMAP4430_PRM_RSTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0000)
+#define OMAP4_PRM_RSTST_OFFSET				0x0004
 #define OMAP4430_PRM_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0004)
+#define OMAP4_PRM_RSTTIME_OFFSET			0x0008
 #define OMAP4430_PRM_RSTTIME				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0008)
+#define OMAP4_PRM_CLKREQCTRL_OFFSET			0x000c
 #define OMAP4430_PRM_CLKREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x000c)
+#define OMAP4_PRM_VOLTCTRL_OFFSET			0x0010
 #define OMAP4430_PRM_VOLTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0010)
+#define OMAP4_PRM_PWRREQCTRL_OFFSET			0x0014
 #define OMAP4430_PRM_PWRREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0014)
+#define OMAP4_PRM_PSCON_COUNT_OFFSET			0x0018
 #define OMAP4430_PRM_PSCON_COUNT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0018)
+#define OMAP4_PRM_IO_COUNT_OFFSET			0x001c
 #define OMAP4430_PRM_IO_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x001c)
+#define OMAP4_PRM_IO_PMCTRL_OFFSET			0x0020
 #define OMAP4430_PRM_IO_PMCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0020)
+#define OMAP4_PRM_VOLTSETUP_WARMRESET_OFFSET		0x0024
 #define OMAP4430_PRM_VOLTSETUP_WARMRESET		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0024)
+#define OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET		0x0028
 #define OMAP4430_PRM_VOLTSETUP_CORE_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0028)
+#define OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET		0x002c
 #define OMAP4430_PRM_VOLTSETUP_MPU_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x002c)
+#define OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET		0x0030
 #define OMAP4430_PRM_VOLTSETUP_IVA_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0030)
+#define OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET	0x0034
 #define OMAP4430_PRM_VOLTSETUP_CORE_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0034)
+#define OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET	0x0038
 #define OMAP4430_PRM_VOLTSETUP_MPU_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0038)
+#define OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET	0x003c
 #define OMAP4430_PRM_VOLTSETUP_IVA_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x003c)
+#define OMAP4_PRM_VP_CORE_CONFIG_OFFSET			0x0040
 #define OMAP4430_PRM_VP_CORE_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0040)
+#define OMAP4_PRM_VP_CORE_STATUS_OFFSET			0x0044
 #define OMAP4430_PRM_VP_CORE_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0044)
+#define OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET		0x0048
 #define OMAP4430_PRM_VP_CORE_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0048)
+#define OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET		0x004c
 #define OMAP4430_PRM_VP_CORE_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x004c)
+#define OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET		0x0050
 #define OMAP4430_PRM_VP_CORE_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0050)
+#define OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET		0x0054
 #define OMAP4430_PRM_VP_CORE_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0054)
+#define OMAP4_PRM_VP_MPU_CONFIG_OFFSET			0x0058
 #define OMAP4430_PRM_VP_MPU_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0058)
+#define OMAP4_PRM_VP_MPU_STATUS_OFFSET			0x005c
 #define OMAP4430_PRM_VP_MPU_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x005c)
+#define OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET		0x0060
 #define OMAP4430_PRM_VP_MPU_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0060)
+#define OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET			0x0064
 #define OMAP4430_PRM_VP_MPU_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0064)
+#define OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET		0x0068
 #define OMAP4430_PRM_VP_MPU_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0068)
+#define OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET		0x006c
 #define OMAP4430_PRM_VP_MPU_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x006c)
+#define OMAP4_PRM_VP_IVA_CONFIG_OFFSET			0x0070
 #define OMAP4430_PRM_VP_IVA_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0070)
+#define OMAP4_PRM_VP_IVA_STATUS_OFFSET			0x0074
 #define OMAP4430_PRM_VP_IVA_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0074)
+#define OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET		0x0078
 #define OMAP4430_PRM_VP_IVA_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0078)
+#define OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET			0x007c
 #define OMAP4430_PRM_VP_IVA_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x007c)
+#define OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET		0x0080
 #define OMAP4430_PRM_VP_IVA_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0080)
+#define OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET		0x0084
 #define OMAP4430_PRM_VP_IVA_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0084)
+#define OMAP4_PRM_VC_SMPS_SA_OFFSET			0x0088
 #define OMAP4430_PRM_VC_SMPS_SA				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0088)
+#define OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET		0x008c
 #define OMAP4430_PRM_VC_VAL_SMPS_RA_VOL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x008c)
+#define OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET		0x0090
 #define OMAP4430_PRM_VC_VAL_SMPS_RA_CMD			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0090)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET		0x0094
 #define OMAP4430_PRM_VC_VAL_CMD_VDD_CORE_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0094)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET		0x0098
 #define OMAP4430_PRM_VC_VAL_CMD_VDD_MPU_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0098)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET		0x009c
 #define OMAP4430_PRM_VC_VAL_CMD_VDD_IVA_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x009c)
+#define OMAP4_PRM_VC_VAL_BYPASS_OFFSET			0x00a0
 #define OMAP4430_PRM_VC_VAL_BYPASS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a0)
+#define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET			0x00a4
 #define OMAP4430_PRM_VC_CFG_CHANNEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a4)
+#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET		0x00a8
 #define OMAP4430_PRM_VC_CFG_I2C_MODE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a8)
+#define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET			0x00ac
 #define OMAP4430_PRM_VC_CFG_I2C_CLK			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ac)
+#define OMAP4_PRM_SRAM_COUNT_OFFSET			0x00b0
 #define OMAP4430_PRM_SRAM_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b0)
+#define OMAP4_PRM_SRAM_WKUP_SETUP_OFFSET		0x00b4
 #define OMAP4430_PRM_SRAM_WKUP_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b4)
+#define OMAP4_PRM_LDO_SRAM_CORE_SETUP_OFFSET		0x00b8
 #define OMAP4430_PRM_LDO_SRAM_CORE_SETUP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b8)
+#define OMAP4_PRM_LDO_SRAM_CORE_CTRL_OFFSET		0x00bc
 #define OMAP4430_PRM_LDO_SRAM_CORE_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00bc)
+#define OMAP4_PRM_LDO_SRAM_MPU_SETUP_OFFSET		0x00c0
 #define OMAP4430_PRM_LDO_SRAM_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c0)
+#define OMAP4_PRM_LDO_SRAM_MPU_CTRL_OFFSET		0x00c4
 #define OMAP4430_PRM_LDO_SRAM_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c4)
+#define OMAP4_PRM_LDO_SRAM_IVA_SETUP_OFFSET		0x00c8
 #define OMAP4430_PRM_LDO_SRAM_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c8)
+#define OMAP4_PRM_LDO_SRAM_IVA_CTRL_OFFSET		0x00cc
 #define OMAP4430_PRM_LDO_SRAM_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00cc)
+#define OMAP4_PRM_LDO_ABB_MPU_SETUP_OFFSET		0x00d0
 #define OMAP4430_PRM_LDO_ABB_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d0)
+#define OMAP4_PRM_LDO_ABB_MPU_CTRL_OFFSET		0x00d4
 #define OMAP4430_PRM_LDO_ABB_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d4)
+#define OMAP4_PRM_LDO_ABB_IVA_SETUP_OFFSET		0x00d8
 #define OMAP4430_PRM_LDO_ABB_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d8)
+#define OMAP4_PRM_LDO_ABB_IVA_CTRL_OFFSET		0x00dc
 #define OMAP4430_PRM_LDO_ABB_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00dc)
+#define OMAP4_PRM_LDO_BANDGAP_CTRL_OFFSET		0x00e0
 #define OMAP4430_PRM_LDO_BANDGAP_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
+#define OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET		0x00e4
 #define OMAP4430_PRM_DEVICE_OFF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e4)
+#define OMAP4_PRM_PHASE1_CNDP_OFFSET			0x00e8
 #define OMAP4430_PRM_PHASE1_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e8)
+#define OMAP4_PRM_PHASE2A_CNDP_OFFSET			0x00ec
 #define OMAP4430_PRM_PHASE2A_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ec)
+#define OMAP4_PRM_PHASE2B_CNDP_OFFSET			0x00f0
 #define OMAP4430_PRM_PHASE2B_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f0)
+#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET			0x00f4
 #define OMAP4430_PRM_MODEM_IF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f4)
 
-/* CHIRON_PRCM */
+/*
+ * PRCM_MPU
+ *
+ * The PRCM_MPU is a local PRCM inside the MPU subsystem. For the PRCM (global)
+ * point of view the PRCM_MPU is a single entity. It shares the same
+ * programming model as the global PRCM and thus can be assimilate as two new
+ * MOD inside the PRCM
+ */
 
+/* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
+#define OMAP4_REVISION_PRCM_OFFSET			0x0000
+#define OMAP4430_REVISION_PRCM				OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD, 0x0000)
 
-/* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */
-#define OMAP4430_REVISION_PRCM				OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000)
+/* PRCM_MPU.DEVICE_PRM register offsets */
+#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET			0x0000
+#define OMAP4430_PRCM_MPU_PRM_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0000)
 
-/* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */
-#define OMAP4430_CHIRON_PRCM_PRM_RSTST			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000)
+/* PRCM_MPU.CPU0 register offsets */
+#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET			0x0000
+#define OMAP4430_PM_CPU0_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0000)
+#define OMAP4_PM_CPU0_PWRSTST_OFFSET			0x0004
+#define OMAP4430_PM_CPU0_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0004)
+#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET		0x0008
+#define OMAP4430_RM_CPU0_CPU0_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0008)
+#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET		0x000c
+#define OMAP4430_RM_CPU0_CPU0_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x000c)
+#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET			0x0010
+#define OMAP4430_RM_CPU0_CPU0_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0010)
+#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET		0x0014
+#define OMAP4430_CM_CPU0_CPU0_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0014)
+#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET			0x0018
+#define OMAP4430_CM_CPU0_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0018)
 
-/* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */
-#define OMAP4430_PM_PDA_CPU0_PWRSTCTRL			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000)
-#define OMAP4430_PM_PDA_CPU0_PWRSTST			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004)
-#define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008)
-#define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c)
-#define OMAP4430_RM_PDA_CPU0_CPU0_RSTST			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010)
-#define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014)
-#define OMAP4430_CM_PDA_CPU0_CLKSTCTRL			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018)
-
-/* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */
-#define OMAP4430_PM_PDA_CPU1_PWRSTCTRL			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000)
-#define OMAP4430_PM_PDA_CPU1_PWRSTST			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004)
-#define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008)
-#define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c)
-#define OMAP4430_RM_PDA_CPU1_CPU1_RSTST			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010)
-#define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL		OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014)
-#define OMAP4430_CM_PDA_CPU1_CLKSTCTRL			OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018)
+/* PRCM_MPU.CPU1 register offsets */
+#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET			0x0000
+#define OMAP4430_PM_CPU1_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0000)
+#define OMAP4_PM_CPU1_PWRSTST_OFFSET			0x0004
+#define OMAP4430_PM_CPU1_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0004)
+#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET		0x0008
+#define OMAP4430_RM_CPU1_CPU1_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0008)
+#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET		0x000c
+#define OMAP4430_RM_CPU1_CPU1_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x000c)
+#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET			0x0010
+#define OMAP4430_RM_CPU1_CPU1_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0010)
+#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET		0x0014
+#define OMAP4430_CM_CPU1_CPU1_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0014)
+#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET			0x0018
+#define OMAP4430_CM_CPU1_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0018)
 #endif
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 6da796e..78b49a6 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -110,8 +110,13 @@
 	tristate
 
 config OMAP_IOMMU_DEBUG
-	depends on OMAP_IOMMU
-	tristate
+       tristate "Export OMAP IOMMU internals in DebugFS"
+       depends on OMAP_IOMMU && DEBUG_FS
+       help
+         Select this to see extensive information about
+         the internal state of OMAP IOMMU in debugfs.
+
+         Say N unless you know you need this.
 
 choice
 	prompt "System timer"
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 5261a09..7190cbd 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -12,14 +12,12 @@
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/clk.h>
 #include <linux/mutex.h>
-#include <linux/platform_device.h>
 #include <linux/cpufreq.h>
 #include <linux/debugfs.h>
 #include <linux/io.h>
@@ -32,9 +30,9 @@
 
 static struct clk_functions *arch_clock;
 
-/*-------------------------------------------------------------------------
+/*
  * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
+ */
 
 int clk_enable(struct clk *clk)
 {
@@ -92,9 +90,9 @@
 }
 EXPORT_SYMBOL(clk_get_rate);
 
-/*-------------------------------------------------------------------------
+/*
  * Optional clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
+ */
 
 long clk_round_rate(struct clk *clk, unsigned long rate)
 {
@@ -140,9 +138,6 @@
 	unsigned long flags;
 	int ret = -EINVAL;
 
-	if (cpu_is_omap44xx())
-	/* OMAP4 clk framework not supported yet */
-		return 0;
 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
 		return ret;
 
@@ -169,9 +164,9 @@
 }
 EXPORT_SYMBOL(clk_get_parent);
 
-/*-------------------------------------------------------------------------
+/*
  * OMAP specific clock functions shared between omap1 and omap2
- *-------------------------------------------------------------------------*/
+ */
 
 int __initdata mpurate;
 
@@ -222,7 +217,7 @@
 }
 
 /* Propagate rate to children */
-void propagate_rate(struct clk * tclk)
+void propagate_rate(struct clk *tclk)
 {
 	struct clk *clkp;
 
@@ -389,7 +384,9 @@
 }
 #endif
 
-/*-------------------------------------------------------------------------*/
+/*
+ *
+ */
 
 #ifdef CONFIG_OMAP_RESET_CLOCKS
 /*
@@ -404,7 +401,7 @@
 		if (ck->ops == &clkops_null)
 			continue;
 
-		if (ck->usecount > 0 || ck->enable_reg == 0)
+		if (ck->usecount > 0 || !ck->enable_reg)
 			continue;
 
 		spin_lock_irqsave(&clockfw_lock, flags);
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index f12f0e3..219c01e 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -47,9 +47,6 @@
 struct omap_board_config_kernel *omap_board_config;
 int omap_board_config_size;
 
-/* used by omap-smp.c and board-4430sdp.c */
-void __iomem *gic_cpu_base_addr;
-
 static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
 {
 	struct omap_board_config_kernel *kinfo = NULL;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 1d95996..f7f571e 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -501,7 +501,8 @@
 			burst = 0x2;
 			break;
 		}
-		/* not supported by current hardware on OMAP1
+		/*
+		 * not supported by current hardware on OMAP1
 		 * w |= (0x03 << 7);
 		 * fall through
 		 */
@@ -510,7 +511,8 @@
 			burst = 0x3;
 			break;
 		}
-		/* OMAP1 don't support burst 16
+		/*
+		 * OMAP1 don't support burst 16
 		 * fall through
 		 */
 	default:
@@ -604,7 +606,8 @@
 			burst = 0x3;
 			break;
 		}
-		/* OMAP1 don't support burst 16
+		/*
+		 * OMAP1 don't support burst 16
 		 * fall through
 		 */
 	default:
@@ -709,6 +712,21 @@
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 }
 
+static inline void omap2_disable_irq_lch(int lch)
+{
+	u32 val;
+	unsigned long flags;
+
+	if (!cpu_class_is_omap2())
+		return;
+
+	spin_lock_irqsave(&dma_chan_lock, flags);
+	val = dma_read(IRQENABLE_L0);
+	val &= ~(1 << lch);
+	dma_write(val, IRQENABLE_L0);
+	spin_unlock_irqrestore(&dma_chan_lock, flags);
+}
+
 int omap_request_dma(int dev_id, const char *dev_name,
 		     void (*callback)(int lch, u16 ch_status, void *data),
 		     void *data, int *dma_ch_out)
@@ -807,14 +825,7 @@
 	}
 
 	if (cpu_class_is_omap2()) {
-		u32 val;
-
-		spin_lock_irqsave(&dma_chan_lock, flags);
-		/* Disable interrupts */
-		val = dma_read(IRQENABLE_L0);
-		val &= ~(1 << lch);
-		dma_write(val, IRQENABLE_L0);
-		spin_unlock_irqrestore(&dma_chan_lock, flags);
+		omap2_disable_irq_lch(lch);
 
 		/* Clear the CSR register and IRQ status register */
 		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
@@ -1277,8 +1288,10 @@
 		return -EINVAL;
 	}
 
-	/* Allocate a queue to maintain the status of the channels
-	 * in the chain */
+	/*
+	 * Allocate a queue to maintain the status of the channels
+	 * in the chain
+	 */
 	channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
 	if (channels == NULL) {
 		printk(KERN_ERR "omap_dma: No memory for channel queue\n");
@@ -1907,7 +1920,8 @@
 		printk(KERN_INFO "DMA transaction error with device %d\n",
 		       dma_chan[ch].dev_id);
 		if (cpu_class_is_omap2()) {
-			/* Errata: sDMA Channel is not disabled
+			/*
+			 * Errata: sDMA Channel is not disabled
 			 * after a transaction error. So we explicitely
 			 * disable the channel
 			 */
@@ -2107,6 +2121,9 @@
 
 	for (ch = 0; ch < dma_chan_count; ch++) {
 		omap_clear_dma(ch);
+		if (cpu_class_is_omap2())
+			omap2_disable_irq_lch(ch);
+
 		dma_chan[ch].dev_id = -1;
 		dma_chan[ch].next_lch = -1;
 
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 4d99dfbc..c64875f 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -264,8 +264,8 @@
 	{ .phys_base = 0x4a320000, .irq = OMAP44XX_IRQ_GPT12 },
 };
 static const char *omap4_dm_source_names[] __initdata = {
-	"sys_ck",
-	"omap_32k_fck",
+	"sys_clkin_ck",
+	"sys_32k_ck",
 	NULL
 };
 static struct clk *omap4_dm_source_clocks[2];
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 45a225d..dc2ac42 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -27,6 +27,7 @@
 #include <mach/irqs.h>
 #include <mach/gpio.h>
 #include <asm/mach/irq.h>
+#include <plat/powerdomain.h>
 
 /*
  * OMAP1510 GPIO registers
@@ -137,7 +138,11 @@
 #define OMAP4_GPIO_IRQSTATUSCLR1	0x0040
 #define OMAP4_GPIO_IRQWAKEN0		0x0044
 #define OMAP4_GPIO_IRQWAKEN1		0x0048
-#define OMAP4_GPIO_SYSSTATUS		0x0104
+#define OMAP4_GPIO_SYSSTATUS		0x0114
+#define OMAP4_GPIO_IRQENABLE1		0x011c
+#define OMAP4_GPIO_WAKE_EN		0x0120
+#define OMAP4_GPIO_IRQSTATUS2		0x0128
+#define OMAP4_GPIO_IRQENABLE2		0x012c
 #define OMAP4_GPIO_CTRL			0x0130
 #define OMAP4_GPIO_OE			0x0134
 #define OMAP4_GPIO_DATAIN		0x0138
@@ -148,6 +153,10 @@
 #define OMAP4_GPIO_FALLINGDETECT	0x014c
 #define OMAP4_GPIO_DEBOUNCENABLE	0x0150
 #define OMAP4_GPIO_DEBOUNCINGTIME	0x0154
+#define OMAP4_GPIO_CLEARIRQENABLE1	0x0160
+#define OMAP4_GPIO_SETIRQENABLE1	0x0164
+#define OMAP4_GPIO_CLEARWKUENA		0x0180
+#define OMAP4_GPIO_SETWKUENA		0x0184
 #define OMAP4_GPIO_CLEARDATAOUT		0x0190
 #define OMAP4_GPIO_SETDATAOUT		0x0194
 /*
@@ -195,6 +204,7 @@
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
+	u32 dbck_enable_mask;
 };
 
 #define METHOD_MPUIO		0
@@ -303,8 +313,6 @@
 	u32 risingdetect;
 	u32 fallingdetect;
 	u32 dataout;
-	u32 setwkuena;
-	u32 setdataout;
 };
 
 static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
@@ -591,12 +599,16 @@
 		reg += OMAP7XX_GPIO_DATA_OUTPUT;
 		break;
 #endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
 	case METHOD_GPIO_24XX:
-	case METHOD_GPIO_44XX:
 		reg += OMAP24XX_GPIO_DATAOUT;
 		break;
 #endif
+#ifdef CONFIG_ARCH_OMAP4
+	case METHOD_GPIO_44XX:
+		reg += OMAP4_GPIO_DATAOUT;
+		break;
+#endif
 	default:
 		return -EINVAL;
 	}
@@ -646,6 +658,7 @@
 		goto done;
 
 	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+		bank->dbck_enable_mask = val;
 		if (enable)
 			clk_enable(bank->dbck);
 		else
@@ -724,15 +737,27 @@
 							 OMAP4_GPIO_IRQWAKEN0);
 			}
 		} else {
-			if (trigger != 0)
+			/*
+			 * GPIO wakeup request can only be generated on edge
+			 * transitions
+			 */
+			if (trigger & IRQ_TYPE_EDGE_BOTH)
 				__raw_writel(1 << gpio, bank->base
 					+ OMAP24XX_GPIO_SETWKUENA);
 			else
 				__raw_writel(1 << gpio, bank->base
 					+ OMAP24XX_GPIO_CLEARWKUENA);
 		}
-	} else {
-		if (trigger != 0)
+	}
+	/* This part needs to be executed always for OMAP34xx */
+	if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+		/*
+		 * Log the edge gpio and manually trigger the IRQ
+		 * after resume if the input level changes
+		 * to avoid irq lost during PER RET/OFF mode
+		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
+		 */
+		if (trigger & IRQ_TYPE_EDGE_BOTH)
 			bank->enabled_non_wakeup_gpios |= gpio_bit;
 		else
 			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
@@ -1200,11 +1225,17 @@
 #endif
 	if (!cpu_class_is_omap1()) {
 		if (!bank->mod_usage) {
+			void __iomem *reg = bank->base;
 			u32 ctrl;
-			ctrl = __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
-			ctrl &= 0xFFFFFFFE;
+
+			if (cpu_is_omap24xx() || cpu_is_omap34xx())
+				reg += OMAP24XX_GPIO_CTRL;
+			else if (cpu_is_omap44xx())
+				reg += OMAP4_GPIO_CTRL;
+			ctrl = __raw_readl(reg);
 			/* Module is enabled, clocks are not gated */
-			__raw_writel(ctrl, bank->base + OMAP24XX_GPIO_CTRL);
+			ctrl &= 0xFFFFFFFE;
+			__raw_writel(ctrl, reg);
 		}
 		bank->mod_usage |= 1 << offset;
 	}
@@ -1226,22 +1257,34 @@
 		__raw_writel(1 << offset, reg);
 	}
 #endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
-	if ((bank->method == METHOD_GPIO_24XX) ||
-			(bank->method == METHOD_GPIO_44XX)) {
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+	if (bank->method == METHOD_GPIO_24XX) {
 		/* Disable wake-up during idle for dynamic tick */
 		void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
 		__raw_writel(1 << offset, reg);
 	}
 #endif
+#ifdef CONFIG_ARCH_OMAP4
+	if (bank->method == METHOD_GPIO_44XX) {
+		/* Disable wake-up during idle for dynamic tick */
+		void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
+		__raw_writel(1 << offset, reg);
+	}
+#endif
 	if (!cpu_class_is_omap1()) {
 		bank->mod_usage &= ~(1 << offset);
 		if (!bank->mod_usage) {
+			void __iomem *reg = bank->base;
 			u32 ctrl;
-			ctrl = __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
+
+			if (cpu_is_omap24xx() || cpu_is_omap34xx())
+				reg += OMAP24XX_GPIO_CTRL;
+			else if (cpu_is_omap44xx())
+				reg += OMAP4_GPIO_CTRL;
+			ctrl = __raw_readl(reg);
 			/* Module is disabled, clocks are gated */
 			ctrl |= 1;
-			__raw_writel(ctrl, bank->base + OMAP24XX_GPIO_CTRL);
+			__raw_writel(ctrl, reg);
 		}
 	}
 	_reset_gpio(bank, bank->chip.base + offset);
@@ -1570,9 +1613,14 @@
 		reg += OMAP7XX_GPIO_DIR_CONTROL;
 		break;
 	case METHOD_GPIO_24XX:
-	case METHOD_GPIO_44XX:
 		reg += OMAP24XX_GPIO_OE;
 		break;
+	case METHOD_GPIO_44XX:
+		reg += OMAP4_GPIO_OE;
+		break;
+	default:
+		WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
+		return -EINVAL;
 	}
 	return __raw_readl(reg) & mask;
 }
@@ -1845,7 +1893,8 @@
 				__raw_writel(0, bank->base +
 						OMAP24XX_GPIO_CTRL);
 			}
-			if (i < ARRAY_SIZE(non_wakeup_gpios))
+			if (cpu_is_omap24xx() &&
+			    i < ARRAY_SIZE(non_wakeup_gpios))
 				bank->non_wakeup_gpios = non_wakeup_gpios[i];
 			gpio_count = 32;
 		}
@@ -2028,16 +2077,27 @@
 
 static int workaround_enabled;
 
-void omap2_gpio_prepare_for_retention(void)
+void omap2_gpio_prepare_for_idle(int power_state)
 {
 	int i, c = 0;
+	int min = 0;
 
-	/* Remove triggering for all non-wakeup GPIOs.  Otherwise spurious
-	 * IRQs will be generated.  See OMAP2420 Errata item 1.101. */
-	for (i = 0; i < gpio_bank_count; i++) {
+	if (cpu_is_omap34xx())
+		min = 1;
+
+	for (i = min; i < gpio_bank_count; i++) {
 		struct gpio_bank *bank = &gpio_bank[i];
 		u32 l1, l2;
 
+		if (bank->dbck_enable_mask)
+			clk_disable(bank->dbck);
+
+		if (power_state > PWRDM_POWER_OFF)
+			continue;
+
+		/* If going to OFF, remove triggering for all
+		 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
+		 * generated.  See OMAP2420 Errata item 1.101. */
 		if (!(bank->enabled_non_wakeup_gpios))
 			continue;
 
@@ -2085,16 +2145,23 @@
 	workaround_enabled = 1;
 }
 
-void omap2_gpio_resume_after_retention(void)
+void omap2_gpio_resume_after_idle(void)
 {
 	int i;
+	int min = 0;
 
-	if (!workaround_enabled)
-		return;
-	for (i = 0; i < gpio_bank_count; i++) {
+	if (cpu_is_omap34xx())
+		min = 1;
+	for (i = min; i < gpio_bank_count; i++) {
 		struct gpio_bank *bank = &gpio_bank[i];
 		u32 l, gen, gen0, gen1;
 
+		if (bank->dbck_enable_mask)
+			clk_enable(bank->dbck);
+
+		if (!workaround_enabled)
+			continue;
+
 		if (!(bank->enabled_non_wakeup_gpios))
 			continue;
 
@@ -2119,7 +2186,7 @@
 		 * horribly racy, but it's the best we can do to work around
 		 * this silicon bug. */
 		l ^= bank->saved_datain;
-		l &= bank->non_wakeup_gpios;
+		l &= bank->enabled_non_wakeup_gpios;
 
 		/*
 		 * No need to generate IRQs for the rising edge for gpio IRQs
@@ -2207,10 +2274,6 @@
 			__raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
 		gpio_context[i].dataout =
 			__raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
-		gpio_context[i].setwkuena =
-			__raw_readl(bank->base + OMAP24XX_GPIO_SETWKUENA);
-		gpio_context[i].setdataout =
-			__raw_readl(bank->base + OMAP24XX_GPIO_SETDATAOUT);
 	}
 }
 
@@ -2243,10 +2306,6 @@
 				bank->base + OMAP24XX_GPIO_FALLINGDETECT);
 		__raw_writel(gpio_context[i].dataout,
 				bank->base + OMAP24XX_GPIO_DATAOUT);
-		__raw_writel(gpio_context[i].setwkuena,
-				bank->base + OMAP24XX_GPIO_SETWKUENA);
-		__raw_writel(gpio_context[i].setdataout,
-				bank->base + OMAP24XX_GPIO_SETDATAOUT);
 	}
 }
 #endif
@@ -2286,110 +2345,3 @@
 }
 
 arch_initcall(omap_gpio_sysinit);
-
-
-#ifdef	CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
-	unsigned	i, j, gpio;
-
-	for (i = 0, gpio = 0; i < gpio_bank_count; i++) {
-		struct gpio_bank	*bank = gpio_bank + i;
-		unsigned		bankwidth = 16;
-		u32			mask = 1;
-
-		if (bank_is_mpuio(bank))
-			gpio = OMAP_MPUIO(0);
-		else if (cpu_class_is_omap2() || cpu_is_omap7xx())
-			bankwidth = 32;
-
-		for (j = 0; j < bankwidth; j++, gpio++, mask <<= 1) {
-			unsigned	irq, value, is_in, irqstat;
-			const char	*label;
-
-			label = gpiochip_is_requested(&bank->chip, j);
-			if (!label)
-				continue;
-
-			irq = bank->virtual_irq_start + j;
-			value = gpio_get_value(gpio);
-			is_in = gpio_is_input(bank, mask);
-
-			if (bank_is_mpuio(bank))
-				seq_printf(s, "MPUIO %2d ", j);
-			else
-				seq_printf(s, "GPIO %3d ", gpio);
-			seq_printf(s, "(%-20.20s): %s %s",
-					label,
-					is_in ? "in " : "out",
-					value ? "hi"  : "lo");
-
-/* FIXME for at least omap2, show pullup/pulldown state */
-
-			irqstat = irq_desc[irq].status;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
-			if (is_in && ((bank->suspend_wakeup & mask)
-					|| irqstat & IRQ_TYPE_SENSE_MASK)) {
-				char	*trigger = NULL;
-
-				switch (irqstat & IRQ_TYPE_SENSE_MASK) {
-				case IRQ_TYPE_EDGE_FALLING:
-					trigger = "falling";
-					break;
-				case IRQ_TYPE_EDGE_RISING:
-					trigger = "rising";
-					break;
-				case IRQ_TYPE_EDGE_BOTH:
-					trigger = "bothedge";
-					break;
-				case IRQ_TYPE_LEVEL_LOW:
-					trigger = "low";
-					break;
-				case IRQ_TYPE_LEVEL_HIGH:
-					trigger = "high";
-					break;
-				case IRQ_TYPE_NONE:
-					trigger = "(?)";
-					break;
-				}
-				seq_printf(s, ", irq-%d %-8s%s",
-						irq, trigger,
-						(bank->suspend_wakeup & mask)
-							? " wakeup" : "");
-			}
-#endif
-			seq_printf(s, "\n");
-		}
-
-		if (bank_is_mpuio(bank)) {
-			seq_printf(s, "\n");
-			gpio = 0;
-		}
-	}
-	return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
-	.open		= dbg_gpio_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int __init omap_gpio_debuginit(void)
-{
-	(void) debugfs_create_file("omap_gpio", S_IRUGO,
-					NULL, NULL, &debug_fops);
-	return 0;
-}
-late_initcall(omap_gpio_debuginit);
-#endif
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index f044b59..eec2b49 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -38,6 +38,7 @@
 #define OMAP2_I2C_BASE1		0x48070000
 #define OMAP2_I2C_BASE2		0x48072000
 #define OMAP2_I2C_BASE3		0x48060000
+#define OMAP4_I2C_BASE4		0x48350000
 
 static const char name[] = "i2c_omap";
 
@@ -54,11 +55,14 @@
 
 static struct resource i2c_resources[][2] = {
 	{ I2C_RESOURCE_BUILDER(0, 0) },
-#if	defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE2, INT_24XX_I2C2_IRQ) },
+#if	defined(CONFIG_ARCH_OMAP2PLUS)
+	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE2, 0) },
 #endif
-#if	defined(CONFIG_ARCH_OMAP3)
-	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE3, INT_34XX_I2C3_IRQ) },
+#if	defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
+	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE3, 0) },
+#endif
+#if	defined(CONFIG_ARCH_OMAP4)
+	{ I2C_RESOURCE_BUILDER(OMAP4_I2C_BASE4, 0) },
 #endif
 };
 
@@ -76,12 +80,15 @@
 static struct omap_i2c_bus_platform_data i2c_pdata[ARRAY_SIZE(i2c_resources)];
 static struct platform_device omap_i2c_devices[] = {
 	I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_pdata[0]),
-#if	defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+#if	defined(CONFIG_ARCH_OMAP2PLUS)
 	I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_pdata[1]),
 #endif
-#if	defined(CONFIG_ARCH_OMAP3)
+#if	defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
 	I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_pdata[2]),
 #endif
+#if	defined(CONFIG_ARCH_OMAP4)
+	I2C_DEV_BUILDER(4, i2c_resources[3], &i2c_pdata[3]),
+#endif
 };
 
 #define OMAP_I2C_CMDLINE_SETUP	(BIT(31))
@@ -96,37 +103,60 @@
 		ports = 2;
 	else if (cpu_is_omap34xx())
 		ports = 3;
+	else if (cpu_is_omap44xx())
+		ports = 4;
 
 	return ports;
 }
 
-static int __init omap_i2c_add_bus(int bus_id)
+/* Shared between omap2 and 3 */
+static resource_size_t omap2_i2c_irq[3] __initdata = {
+	INT_24XX_I2C1_IRQ,
+	INT_24XX_I2C2_IRQ,
+	INT_34XX_I2C3_IRQ,
+};
+
+static resource_size_t omap4_i2c_irq[4] __initdata = {
+	OMAP44XX_IRQ_I2C1,
+	OMAP44XX_IRQ_I2C2,
+	OMAP44XX_IRQ_I2C3,
+	OMAP44XX_IRQ_I2C4,
+};
+
+static inline int omap1_i2c_add_bus(struct platform_device *pdev, int bus_id)
 {
-	struct platform_device *pdev;
 	struct omap_i2c_bus_platform_data *pd;
 	struct resource *res;
-	resource_size_t base, irq;
 
-	pdev = &omap_i2c_devices[bus_id - 1];
 	pd = pdev->dev.platform_data;
+	res = pdev->resource;
+	res[0].start = OMAP1_I2C_BASE;
+	res[0].end = res[0].start + OMAP_I2C_SIZE;
+	res[1].start = INT_I2C;
+	omap1_i2c_mux_pins(bus_id);
+
+	return platform_device_register(pdev);
+}
+
+static inline int omap2_i2c_add_bus(struct platform_device *pdev, int bus_id)
+{
+	struct resource *res;
+	resource_size_t *irq;
+
+	res = pdev->resource;
+
+	if (!cpu_is_omap44xx())
+		irq = omap2_i2c_irq;
+	else
+		irq = omap4_i2c_irq;
+
 	if (bus_id == 1) {
-		res = pdev->resource;
-		if (cpu_class_is_omap1()) {
-			base = OMAP1_I2C_BASE;
-			irq = INT_I2C;
-		} else {
-			base = OMAP2_I2C_BASE1;
-			irq = INT_24XX_I2C1_IRQ;
-		}
-		res[0].start = base;
-		res[0].end = base + OMAP_I2C_SIZE;
-		res[1].start = irq;
+		res[0].start = OMAP2_I2C_BASE1;
+		res[0].end = res[0].start + OMAP_I2C_SIZE;
 	}
 
-	if (cpu_class_is_omap1())
-		omap1_i2c_mux_pins(bus_id);
-	if (cpu_class_is_omap2())
-		omap2_i2c_mux_pins(bus_id);
+	res[1].start = irq[bus_id - 1];
+	omap2_i2c_mux_pins(bus_id);
 
 	/*
 	 * When waiting for completion of a i2c transfer, we need to
@@ -134,12 +164,28 @@
 	 * ensure quick enough wakeup from idle, when transfer
 	 * completes.
 	 */
-	if (cpu_is_omap34xx())
+	if (cpu_is_omap34xx()) {
+		struct omap_i2c_bus_platform_data *pd;
+
+		pd = pdev->dev.platform_data;
 		pd->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat;
+	}
 
 	return platform_device_register(pdev);
 }
 
+static int __init omap_i2c_add_bus(int bus_id)
+{
+	struct platform_device *pdev;
+
+	pdev = &omap_i2c_devices[bus_id - 1];
+
+	if (cpu_class_is_omap1())
+		return omap1_i2c_add_bus(pdev, bus_id);
+	else
+		return omap2_i2c_add_bus(pdev, bus_id);
+}
+
 /**
  * omap_i2c_bus_setup - Process command line options for the I2C bus speed
  * @str: String of options
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 34f7fa9..dfc472c 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -196,15 +196,15 @@
 #define INVERT_ENABLE		(1 << 4)	/* 0 enables, 1 disables */
 
 /* Clksel_rate flags */
-#define DEFAULT_RATE		(1 << 0)
-#define RATE_IN_242X		(1 << 1)
-#define RATE_IN_243X		(1 << 2)
-#define RATE_IN_343X		(1 << 3)	/* rates common to all 343X */
-#define RATE_IN_3430ES2		(1 << 4)	/* 3430ES2 rates only */
-#define RATE_IN_36XX		(1 << 5)
-#define RATE_IN_4430		(1 << 6)
+#define RATE_IN_242X		(1 << 0)
+#define RATE_IN_243X		(1 << 1)
+#define RATE_IN_3XXX		(1 << 2)	/* rates common to all OMAP3 */
+#define RATE_IN_3430ES2		(1 << 3)	/* 3430ES2 rates only */
+#define RATE_IN_36XX		(1 << 4)
+#define RATE_IN_4430		(1 << 5)
 
 #define RATE_IN_24XX		(RATE_IN_242X | RATE_IN_243X)
 
+#define RATE_IN_3430ES2PLUS	(RATE_IN_3430ES2 | RATE_IN_36XX)
 
 #endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 7556e27..d265018 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -31,9 +31,6 @@
 
 struct sys_timer;
 
-/* used by omap-smp.c and board-4430sdp.c */
-extern void __iomem *gic_cpu_base_addr;
-
 extern void omap_map_common_io(void);
 extern struct sys_timer omap_timer;
 
diff --git a/arch/arm/plat-omap/include/plat/control.h b/arch/arm/plat-omap/include/plat/control.h
index a56deee..131bf40 100644
--- a/arch/arm/plat-omap/include/plat/control.h
+++ b/arch/arm/plat-omap/include/plat/control.h
@@ -207,6 +207,9 @@
 /* 44xx control status register offset */
 #define OMAP44XX_CONTROL_STATUS		0x2c4
 
+/* 44xx-only CONTROL_GENERAL register offsets */
+#define OMAP44XX_CONTROL_MMC1			0x628
+#define OMAP44XX_CONTROL_PBIAS_LITE		0x600
 /*
  * REVISIT: This list of registers is not comprehensive - there are more
  * that should be added.
@@ -252,6 +255,23 @@
 #define OMAP2_PBIASLITEPWRDNZ0		(1 << 1)
 #define OMAP2_PBIASLITEVMODE0		(1 << 0)
 
+/* CONTROL_PBIAS_LITE bits for OMAP4 */
+#define OMAP4_MMC1_PWRDNZ			(1 << 26)
+#define OMAP4_MMC1_PBIASLITE_HIZ_MODE		(1 << 25)
+#define OMAP4_MMC1_PBIASLITE_SUPPLY_HI_OUT	(1 << 24)
+#define OMAP4_MMC1_PBIASLITE_VMODE_ERROR	(1 << 23)
+#define OMAP4_MMC1_PBIASLITE_PWRDNZ		(1 << 22)
+#define OMAP4_MMC1_PBIASLITE_VMODE		(1 << 21)
+#define OMAP4_USBC1_ICUSB_PWRDNZ		(1 << 20)
+
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0	(1 << 31)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1	(1 << 30)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2	(1 << 29)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3	(1 << 28)
+#define OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL	(1 << 27)
+#define OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL	(1 << 26)
+#define OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL	(1 << 25)
+
 /* CONTROL_PROG_IO1 bits */
 #define OMAP3630_PRG_SDMMC1_SPEEDCTRL	(1 << 20)
 
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index de7c547..de1c604 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -72,8 +72,8 @@
 				 IH_GPIO_BASE + (nr))
 
 extern int omap_gpio_init(void);	/* Call from board init only */
-extern void omap2_gpio_prepare_for_retention(void);
-extern void omap2_gpio_resume_after_retention(void);
+extern void omap2_gpio_prepare_for_idle(int power_state);
+extern void omap2_gpio_resume_after_idle(void);
 extern void omap_set_gpio_debounce(int gpio, int enable);
 extern void omap_set_gpio_debounce_time(int gpio, int enable);
 extern void omap_gpio_save_context(void);
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 4017019..c01d9f0 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,4 +428,8 @@
 
 #include <mach/hardware.h>
 
+#ifdef CONFIG_FIQ
+#define FIQ_START		1024
+#endif
+
 #endif
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a1bac07..c835f1e 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -102,6 +102,10 @@
 		/* Regulator off remapped to sleep */
 		unsigned vcc_aux_disable_is_sleep:1;
 
+		/* we can put the features above into this variable */
+#define HSMMC_HAS_PBIAS		(1 << 0)
+		unsigned features;
+
 		int switch_pin;			/* gpio (card detect) */
 		int gpio_wp;			/* gpio (write protect) */
 
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index f235d32..ffd909f 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -61,9 +61,9 @@
 #  define OMAP_NAME omap16xx
 # endif
 #endif
-#if (defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
+#ifdef CONFIG_ARCH_OMAP2PLUS
 # if (defined(OMAP_NAME) || defined(MULTI_OMAP1))
-#  error "OMAP1 and OMAP2 can't be selected at the same time"
+#  error "OMAP1 and OMAP2PLUS can't be selected at the same time"
 # endif
 #endif
 #ifdef CONFIG_ARCH_OMAP2420
@@ -82,12 +82,20 @@
 #  define OMAP_NAME omap2430
 # endif
 #endif
-#ifdef CONFIG_ARCH_OMAP3430
+#ifdef CONFIG_ARCH_OMAP3
 # ifdef OMAP_NAME
 #  undef  MULTI_OMAP2
 #  define MULTI_OMAP2
 # else
-#  define OMAP_NAME omap3430
+#  define OMAP_NAME omap3
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+# ifdef OMAP_NAME
+#  undef  MULTI_OMAP2
+#  define MULTI_OMAP2
+# else
+#  define OMAP_NAME omap4
 # endif
 #endif
 
diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
index 2845fdc..98fc8b4 100644
--- a/arch/arm/plat-omap/include/plat/omap34xx.h
+++ b/arch/arm/plat-omap/include/plat/omap34xx.h
@@ -82,5 +82,10 @@
 
 #define OMAP34XX_MAILBOX_BASE		(L4_34XX_BASE + 0x94000)
 
+/* Security */
+#define OMAP34XX_SEC_BASE	(L4_34XX_BASE + 0xA0000)
+#define OMAP34XX_SEC_SHA1MD5_BASE	(OMAP34XX_SEC_BASE + 0x23000)
+#define OMAP34XX_SEC_AES_BASE	(OMAP34XX_SEC_BASE + 0x25000)
+
 #endif /* __ASM_ARCH_OMAP3_H */
 
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index b3ef1a7..8b3f12f 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -30,6 +30,7 @@
 #define OMAP4430_CM_BASE		OMAP4430_CM1_BASE
 #define OMAP4430_CM2_BASE		0x4a008000
 #define OMAP4430_PRM_BASE		0x4a306000
+#define OMAP4430_PRCM_MPU_BASE		0x48243000
 #define OMAP44XX_GPMC_BASE		0x50000000
 #define OMAP443X_SCM_BASE		0x4a002000
 #define OMAP443X_CTRL_BASE		0x4a100000
@@ -48,5 +49,8 @@
 #define OMAP44XX_MAILBOX_BASE		(L4_44XX_BASE + 0xF4000)
 #define OMAP44XX_HSUSB_OTG_BASE		(L4_44XX_BASE + 0xAB000)
 
+#define OMAP4_MMU1_BASE			0x55082000
+#define OMAP4_MMU2_BASE			0x4A066000
+
 #endif /* __ASM_ARCH_OMAP44XX_H */
 
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 36d6ea5..0eccc09 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -176,9 +176,8 @@
 #define OCP_USER_SDMA			(1 << 1)
 
 /* omap_hwmod_ocp_if.flags bits */
-#define OCPIF_HAS_IDLEST		(1 << 0)
-#define OCPIF_SWSUP_IDLE		(1 << 1)
-#define OCPIF_CAN_BURST			(1 << 2)
+#define OCPIF_SWSUP_IDLE		(1 << 0)
+#define OCPIF_CAN_BURST			(1 << 1)
 
 /**
  * struct omap_hwmod_ocp_if - OCP interface data
@@ -327,14 +326,12 @@
 
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
- * @module_offs: PRCM submodule offset from the start of the PRM/CM1/CM2
- * @device_offs: device register offset from @module_offs
+ * @clkctrl_reg: PRCM address of the clock control register
  * @submodule_wkdep_bit: bit shift of the WKDEP range
  */
 struct omap_hwmod_omap4_prcm {
-	u32 module_offs;
-	u16 device_offs;
-	u8 submodule_wkdep_bit;
+	void __iomem	*clkctrl_reg;
+	u8		submodule_wkdep_bit;
 };
 
 
@@ -353,6 +350,8 @@
  *     when module is enabled, rather than the default, which is to
  *     enable autoidle
  * HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
+ * HWMOD_NO_IDLEST : this module does not have idle status - this is the case
+ *     only for few initiator modules on OMAP2 & 3.
  */
 #define HWMOD_SWSUP_SIDLE			(1 << 0)
 #define HWMOD_SWSUP_MSTANDBY			(1 << 1)
@@ -360,6 +359,7 @@
 #define HWMOD_INIT_NO_IDLE			(1 << 3)
 #define HWMOD_NO_OCP_AUTOIDLE			(1 << 4)
 #define HWMOD_SET_DEFAULT_CLOCKACT		(1 << 5)
+#define HWMOD_NO_IDLEST				(1 << 6)
 
 /*
  * omap_hwmod._int_flags definitions
diff --git a/arch/arm/plat-omap/include/plat/powerdomain.h b/arch/arm/plat-omap/include/plat/powerdomain.h
index d82b2c0..fb6ec74 100644
--- a/arch/arm/plat-omap/include/plat/powerdomain.h
+++ b/arch/arm/plat-omap/include/plat/powerdomain.h
@@ -31,6 +31,7 @@
 #define PWRDM_MAX_PWRSTS	4
 
 /* Powerdomain allowable state bitfields */
+#define PWRSTS_ON		(1 << PWRDM_POWER_ON)
 #define PWRSTS_OFF_ON		((1 << PWRDM_POWER_OFF) | \
 				 (1 << PWRDM_POWER_ON))
 
@@ -49,6 +50,12 @@
 					  * in MEM bank 1 position. This is
 					  * true for OMAP3430
 					  */
+#define PWRDM_HAS_LOWPOWERSTATECHANGE	(1 << 2) /*
+						  * support to transition from a
+						  * sleep state to a lower sleep
+						  * state without waking up the
+						  * powerdomain
+						  */
 
 /*
  * Number of memory banks that are power-controllable.	On OMAP4430, the
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 83dce4c..19145f5 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -15,6 +15,20 @@
 
 #include <linux/init.h>
 
+/*
+ * Memory entry used for the DEBUG_LL UART configuration. See also
+ * uncompress.h and debug-macro.S.
+ *
+ * Note that using a memory location for storing the UART configuration
+ * has at least two limitations:
+ *
+ * 1. Kernel uncompress code cannot overlap OMAP_UART_INFO as the
+ *    uncompress code could then partially overwrite itself
+ * 2. We assume printascii is called at least once before paging_init,
+ *    and addruart has a chance to read OMAP_UART_INFO
+ */
+#define OMAP_UART_INFO		(PHYS_OFFSET + 0x3ffc)
+
 /* OMAP1 serial ports */
 #define OMAP1_UART1_BASE	0xfffb0000
 #define OMAP1_UART2_BASE	0xfffb0800
@@ -39,7 +53,7 @@
 
 /* External port on Zoom2/3 */
 #define ZOOM_UART_BASE		0x10000000
-#define ZOOM_UART_VIRT		0xfb000000
+#define ZOOM_UART_VIRT		0xfa400000
 
 #define OMAP_PORT_SHIFT		2
 #define OMAP7XX_PORT_SHIFT	0
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 81d9ec5..bbedd71 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -20,27 +20,21 @@
 #include <linux/types.h>
 #include <linux/serial_reg.h>
 
+#include <asm/memory.h>
 #include <asm/mach-types.h>
 
 #include <plat/serial.h>
 
-static volatile u8 *uart1_base;
-static int uart1_shift;
-
 static volatile u8 *uart_base;
 static int uart_shift;
 
 /*
- * Store the DEBUG_LL uart number into UART1 scratchpad register.
+ * Store the DEBUG_LL uart number into memory.
  * See also debug-macro.S, and serial.c for related code.
- *
- * Please note that we currently assume that:
- * - UART1 clocks are enabled for register access
- * - UART1 scratchpad register can be used
  */
-static void set_uart1_scratchpad(unsigned char port)
+static void set_omap_uart_info(unsigned char port)
 {
-	uart1_base[UART_SCR << uart1_shift] = port;
+	*(volatile u32 *)OMAP_UART_INFO = port;
 }
 
 static void putc(int c)
@@ -60,42 +54,38 @@
 /*
  * Macros to configure UART1 and debug UART
  */
-#define _DEBUG_LL_ENTRY(mach, uart1_phys, uart1_shft,			\
-			dbg_uart, dbg_shft, dbg_id)			\
+#define _DEBUG_LL_ENTRY(mach, dbg_uart, dbg_shft, dbg_id)		\
 	if (machine_is_##mach()) {					\
-		uart1_base = (volatile u8 *)(uart1_phys);		\
-		uart1_shift = (uart1_shft);				\
 		uart_base = (volatile u8 *)(dbg_uart);			\
 		uart_shift = (dbg_shft);				\
 		port = (dbg_id);					\
-		set_uart1_scratchpad(port);				\
+		set_omap_uart_info(port);				\
 		break;							\
 	}
 
 #define DEBUG_LL_OMAP7XX(p, mach)					\
-	_DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP7XX_PORT_SHIFT,	\
-		OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, OMAP1UART##p)
+	_DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT,	\
+		OMAP1UART##p)
 
 #define DEBUG_LL_OMAP1(p, mach)						\
-	_DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP_PORT_SHIFT,	\
-		OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP1UART##p)
+	_DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT,	\
+		OMAP1UART##p)
 
 #define DEBUG_LL_OMAP2(p, mach)						\
-	_DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT,	\
-		OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP2UART##p)
+	_DEBUG_LL_ENTRY(mach, OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT,	\
+		OMAP2UART##p)
 
 #define DEBUG_LL_OMAP3(p, mach)						\
-	_DEBUG_LL_ENTRY(mach, OMAP3_UART1_BASE, OMAP_PORT_SHIFT,	\
-		OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP3UART##p)
+	_DEBUG_LL_ENTRY(mach, OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT,	\
+		OMAP3UART##p)
 
 #define DEBUG_LL_OMAP4(p, mach)						\
-	_DEBUG_LL_ENTRY(mach, OMAP4_UART1_BASE, OMAP_PORT_SHIFT,	\
-		OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP4UART##p)
+	_DEBUG_LL_ENTRY(mach, OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT,	\
+		OMAP4UART##p)
 
 /* Zoom2/3 shift is different for UART1 and external port */
 #define DEBUG_LL_ZOOM(mach)						\
-	_DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT,	\
-		ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
+	_DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
 
 static inline void __arch_decomp_setup(unsigned long arch_id)
 {
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 0e13766..bc094db 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -25,6 +25,11 @@
 
 #include "iopgtable.h"
 
+#define for_each_iotlb_cr(obj, n, __i, cr)				\
+	for (__i = 0;							\
+	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
+	     __i++)
+
 /* accommodate the difference between omap1 and omap2/3 */
 static const struct iommu_functions *arch_iommu;
 
@@ -172,15 +177,12 @@
 	l->base = MMU_LOCK_BASE(val);
 	l->vict = MMU_LOCK_VICT(val);
 
-	BUG_ON(l->base != 0); /* Currently no preservation is used */
 }
 
 static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
 {
 	u32 val;
 
-	BUG_ON(l->base != 0); /* Currently no preservation is used */
-
 	val = (l->base << MMU_LOCK_BASE_SHIFT);
 	val |= (l->vict << MMU_LOCK_VICT_SHIFT);
 
@@ -214,6 +216,20 @@
 	return arch_iommu->dump_cr(obj, cr, buf);
 }
 
+/* only used in iotlb iteration for-loop */
+static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
+{
+	struct cr_regs cr;
+	struct iotlb_lock l;
+
+	iotlb_lock_get(obj, &l);
+	l.vict = n;
+	iotlb_lock_set(obj, &l);
+	iotlb_read_cr(obj, &cr);
+
+	return cr;
+}
+
 /**
  * load_iotlb_entry - Set an iommu tlb entry
  * @obj:	target iommu
@@ -221,7 +237,6 @@
  **/
 int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
 {
-	int i;
 	int err = 0;
 	struct iotlb_lock l;
 	struct cr_regs *cr;
@@ -231,22 +246,31 @@
 
 	clk_enable(obj->clk);
 
-	for (i = 0; i < obj->nr_tlb_entries; i++) {
-		struct cr_regs tmp;
-
-		iotlb_lock_get(obj, &l);
-		l.vict = i;
-		iotlb_lock_set(obj, &l);
-		iotlb_read_cr(obj, &tmp);
-		if (!iotlb_cr_valid(&tmp))
-			break;
-	}
-
-	if (i == obj->nr_tlb_entries) {
-		dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
+	iotlb_lock_get(obj, &l);
+	if (l.base == obj->nr_tlb_entries) {
+		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
 		err = -EBUSY;
 		goto out;
 	}
+	if (!e->prsvd) {
+		int i;
+		struct cr_regs tmp;
+
+		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
+			if (!iotlb_cr_valid(&tmp))
+				break;
+
+		if (i == obj->nr_tlb_entries) {
+			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
+			err = -EBUSY;
+			goto out;
+		}
+
+		iotlb_lock_get(obj, &l);
+	} else {
+		l.vict = l.base;
+		iotlb_lock_set(obj, &l);
+	}
 
 	cr = iotlb_alloc_cr(obj, e);
 	if (IS_ERR(cr)) {
@@ -257,9 +281,11 @@
 	iotlb_load_cr(obj, cr);
 	kfree(cr);
 
+	if (e->prsvd)
+		l.base++;
 	/* increment victim for next tlb load */
 	if (++l.vict == obj->nr_tlb_entries)
-		l.vict = 0;
+		l.vict = l.base;
 	iotlb_lock_set(obj, &l);
 out:
 	clk_disable(obj->clk);
@@ -276,20 +302,15 @@
  **/
 void flush_iotlb_page(struct iommu *obj, u32 da)
 {
-	struct iotlb_lock l;
 	int i;
+	struct cr_regs cr;
 
 	clk_enable(obj->clk);
 
-	for (i = 0; i < obj->nr_tlb_entries; i++) {
-		struct cr_regs cr;
+	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
 		u32 start;
 		size_t bytes;
 
-		iotlb_lock_get(obj, &l);
-		l.vict = i;
-		iotlb_lock_set(obj, &l);
-		iotlb_read_cr(obj, &cr);
 		if (!iotlb_cr_valid(&cr))
 			continue;
 
@@ -299,7 +320,6 @@
 		if ((start <= da) && (da < start + bytes)) {
 			dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
 				__func__, start, da, bytes);
-			iotlb_load_cr(obj, &cr);
 			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
 		}
 	}
@@ -370,26 +390,19 @@
 static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
 {
 	int i;
-	struct iotlb_lock saved, l;
+	struct iotlb_lock saved;
+	struct cr_regs tmp;
 	struct cr_regs *p = crs;
 
 	clk_enable(obj->clk);
-
 	iotlb_lock_get(obj, &saved);
-	memcpy(&l, &saved, sizeof(saved));
 
-	for (i = 0; i < num; i++) {
-		struct cr_regs tmp;
-
-		iotlb_lock_get(obj, &l);
-		l.vict = i;
-		iotlb_lock_set(obj, &l);
-		iotlb_read_cr(obj, &tmp);
+	for_each_iotlb_cr(obj, num, i, tmp) {
 		if (!iotlb_cr_valid(&tmp))
 			continue;
-
 		*p++ = tmp;
 	}
+
 	iotlb_lock_set(obj, &saved);
 	clk_disable(obj->clk);
 
@@ -503,6 +516,12 @@
 {
 	u32 *iopgd = iopgd_offset(obj, da);
 
+	if ((da | pa) & ~IOSECTION_MASK) {
+		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+			__func__, da, pa, IOSECTION_SIZE);
+		return -EINVAL;
+	}
+
 	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
 	flush_iopgd_range(iopgd, iopgd);
 	return 0;
@@ -513,6 +532,12 @@
 	u32 *iopgd = iopgd_offset(obj, da);
 	int i;
 
+	if ((da | pa) & ~IOSUPER_MASK) {
+		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+			__func__, da, pa, IOSUPER_SIZE);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < 16; i++)
 		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
 	flush_iopgd_range(iopgd, iopgd + 15);
@@ -542,6 +567,12 @@
 	u32 *iopte = iopte_alloc(obj, iopgd, da);
 	int i;
 
+	if ((da | pa) & ~IOLARGE_MASK) {
+		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+			__func__, da, pa, IOLARGE_SIZE);
+		return -EINVAL;
+	}
+
 	if (IS_ERR(iopte))
 		return PTR_ERR(iopte);
 
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 65c6d1f..e43983b 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -287,16 +287,19 @@
 	prev_end = 0;
 	list_for_each_entry(tmp, &obj->mmap, list) {
 
-		if ((prev_end <= start) && (start + bytes < tmp->da_start))
+		if (prev_end >= start)
+			break;
+
+		if (start + bytes < tmp->da_start)
 			goto found;
 
 		if (flags & IOVMF_DA_ANON)
-			start = roundup(tmp->da_end, alignement);
+			start = roundup(tmp->da_end + 1, alignement);
 
 		prev_end = tmp->da_end;
 	}
 
-	if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
+	if ((start > prev_end) && (ULONG_MAX - start >= bytes))
 		goto found;
 
 	dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 0f51974..f899603 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -2,10 +2,10 @@
  * omap_device implementation
  *
  * Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
+ * Paul Walmsley, Kevin Hilman
  *
  * Developed in collaboration with (alphabetical order): Benoit
- * Cousson, Kevin Hilman, Tony Lindgren, Rajendra Nayak, Vikram
+ * Cousson, Thara Gopinath, Tony Lindgren, Rajendra Nayak, Vikram
  * Pandita, Sakari Poussa, Anand Sawant, Santosh Shilimkar, Richard
  * Woodruff
  *
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 51f4dfb..226b2e8 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -437,6 +437,20 @@
 }
 #endif
 
+#ifdef CONFIG_ARCH_OMAP4
+int __init omap44xx_sram_init(void)
+{
+	printk(KERN_ERR "FIXME: %s not implemented\n", __func__);
+
+	return -ENODEV;
+}
+#else
+static inline int omap44xx_sram_init(void)
+{
+	return 0;
+}
+#endif
+
 int __init omap_sram_init(void)
 {
 	omap_detect_sram();
@@ -451,7 +465,7 @@
 	else if (cpu_is_omap34xx())
 		omap34xx_sram_init();
 	else if (cpu_is_omap44xx())
-		omap34xx_sram_init(); /* FIXME: */
+		omap44xx_sram_init();
 
 	return 0;
 }
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 2c501ce..7367aea 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -439,6 +439,11 @@
 	return -EFAULT;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->retx = ip;
+}
+
 int kgdb_arch_init(void)
 {
 	kgdb_single_step = 0;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7e6fd1c..cdaae94 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1075,6 +1075,8 @@
 	bool "Loongson 2F"
 	depends on SYS_HAS_CPU_LOONGSON2F
 	select CPU_LOONGSON2
+	select GENERIC_GPIO
+	select ARCH_REQUIRE_GPIOLIB
 	help
 	  The Loongson 2F processor implements the MIPS III instruction set
 	  with many extensions.
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 99ae84c..ca0506a 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -36,6 +36,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/sysdev.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
 
@@ -174,10 +175,6 @@
 
 #define DBDEV_TAB_SIZE	ARRAY_SIZE(dbdev_tab)
 
-#ifdef CONFIG_PM
-static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
-#endif
-
 
 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
 
@@ -960,29 +957,37 @@
 	return nbytes;
 }
 
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void)
+
+struct alchemy_dbdma_sysdev {
+	struct sys_device sysdev;
+	u32 pm_regs[NUM_DBDMA_CHANS + 1][6];
+};
+
+static int alchemy_dbdma_suspend(struct sys_device *dev,
+				 pm_message_t state)
 {
+	struct alchemy_dbdma_sysdev *sdev =
+		container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
 	int i;
 	u32 addr;
 
 	addr = DDMA_GLOBAL_BASE;
-	au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
-	au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
-	au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
-	au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
+	sdev->pm_regs[0][0] = au_readl(addr + 0x00);
+	sdev->pm_regs[0][1] = au_readl(addr + 0x04);
+	sdev->pm_regs[0][2] = au_readl(addr + 0x08);
+	sdev->pm_regs[0][3] = au_readl(addr + 0x0c);
 
 	/* save channel configurations */
 	for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
-		au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
-		au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
-		au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
-		au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
-		au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
-		au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
+		sdev->pm_regs[i][0] = au_readl(addr + 0x00);
+		sdev->pm_regs[i][1] = au_readl(addr + 0x04);
+		sdev->pm_regs[i][2] = au_readl(addr + 0x08);
+		sdev->pm_regs[i][3] = au_readl(addr + 0x0c);
+		sdev->pm_regs[i][4] = au_readl(addr + 0x10);
+		sdev->pm_regs[i][5] = au_readl(addr + 0x14);
 
 		/* halt channel */
-		au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
+		au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00);
 		au_sync();
 		while (!(au_readl(addr + 0x14) & 1))
 			au_sync();
@@ -992,32 +997,65 @@
 	/* disable channel interrupts */
 	au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
 	au_sync();
+
+	return 0;
 }
 
-void au1xxx_dbdma_resume(void)
+static int alchemy_dbdma_resume(struct sys_device *dev)
 {
+	struct alchemy_dbdma_sysdev *sdev =
+		container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
 	int i;
 	u32 addr;
 
 	addr = DDMA_GLOBAL_BASE;
-	au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
-	au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
-	au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
-	au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
+	au_writel(sdev->pm_regs[0][0], addr + 0x00);
+	au_writel(sdev->pm_regs[0][1], addr + 0x04);
+	au_writel(sdev->pm_regs[0][2], addr + 0x08);
+	au_writel(sdev->pm_regs[0][3], addr + 0x0c);
 
 	/* restore channel configurations */
 	for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
-		au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
-		au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
-		au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
-		au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
-		au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
-		au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
+		au_writel(sdev->pm_regs[i][0], addr + 0x00);
+		au_writel(sdev->pm_regs[i][1], addr + 0x04);
+		au_writel(sdev->pm_regs[i][2], addr + 0x08);
+		au_writel(sdev->pm_regs[i][3], addr + 0x0c);
+		au_writel(sdev->pm_regs[i][4], addr + 0x10);
+		au_writel(sdev->pm_regs[i][5], addr + 0x14);
 		au_sync();
 		addr += 0x100;	/* next channel base */
 	}
+
+	return 0;
 }
-#endif	/* CONFIG_PM */
+
+static struct sysdev_class alchemy_dbdma_sysdev_class = {
+	.name		= "dbdma",
+	.suspend	= alchemy_dbdma_suspend,
+	.resume		= alchemy_dbdma_resume,
+};
+
+static int __init alchemy_dbdma_sysdev_init(void)
+{
+	struct alchemy_dbdma_sysdev *sdev;
+	int ret;
+
+	ret = sysdev_class_register(&alchemy_dbdma_sysdev_class);
+	if (ret)
+		return ret;
+
+	sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL);
+	if (!sdev)
+		return -ENOMEM;
+
+	sdev->sysdev.id = -1;
+	sdev->sysdev.cls = &alchemy_dbdma_sysdev_class;
+	ret = sysdev_register(&sdev->sysdev);
+	if (ret)
+		kfree(sdev);
+
+	return ret;
+}
 
 static int __init au1xxx_dbdma_init(void)
 {
@@ -1046,6 +1084,11 @@
 	else {
 		dbdma_initialized = 1;
 		printk(KERN_INFO "Alchemy DBDMA initialized\n");
+		ret = alchemy_dbdma_sysdev_init();
+		if (ret) {
+			printk(KERN_ERR "DBDMA PM init failed\n");
+			ret = 0;
+		}
 	}
 
 	return ret;
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index b2821ac..9f78ada 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -29,6 +29,8 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/sysdev.h>
 
 #include <asm/irq_cpu.h>
 #include <asm/mipsregs.h>
@@ -216,90 +218,6 @@
 };
 
 
-#ifdef CONFIG_PM
-
-/*
- * Save/restore the interrupt controller state.
- * Called from the save/restore core registers as part of the
- * au_sleep function in power.c.....maybe I should just pm_register()
- * them instead?
- */
-static unsigned int	sleep_intctl_config0[2];
-static unsigned int	sleep_intctl_config1[2];
-static unsigned int	sleep_intctl_config2[2];
-static unsigned int	sleep_intctl_src[2];
-static unsigned int	sleep_intctl_assign[2];
-static unsigned int	sleep_intctl_wake[2];
-static unsigned int	sleep_intctl_mask[2];
-
-void save_au1xxx_intctl(void)
-{
-	sleep_intctl_config0[0] = au_readl(IC0_CFG0RD);
-	sleep_intctl_config1[0] = au_readl(IC0_CFG1RD);
-	sleep_intctl_config2[0] = au_readl(IC0_CFG2RD);
-	sleep_intctl_src[0] = au_readl(IC0_SRCRD);
-	sleep_intctl_assign[0] = au_readl(IC0_ASSIGNRD);
-	sleep_intctl_wake[0] = au_readl(IC0_WAKERD);
-	sleep_intctl_mask[0] = au_readl(IC0_MASKRD);
-
-	sleep_intctl_config0[1] = au_readl(IC1_CFG0RD);
-	sleep_intctl_config1[1] = au_readl(IC1_CFG1RD);
-	sleep_intctl_config2[1] = au_readl(IC1_CFG2RD);
-	sleep_intctl_src[1] = au_readl(IC1_SRCRD);
-	sleep_intctl_assign[1] = au_readl(IC1_ASSIGNRD);
-	sleep_intctl_wake[1] = au_readl(IC1_WAKERD);
-	sleep_intctl_mask[1] = au_readl(IC1_MASKRD);
-}
-
-/*
- * For most restore operations, we clear the entire register and
- * then set the bits we found during the save.
- */
-void restore_au1xxx_intctl(void)
-{
-	au_writel(0xffffffff, IC0_MASKCLR); au_sync();
-
-	au_writel(0xffffffff, IC0_CFG0CLR); au_sync();
-	au_writel(sleep_intctl_config0[0], IC0_CFG0SET); au_sync();
-	au_writel(0xffffffff, IC0_CFG1CLR); au_sync();
-	au_writel(sleep_intctl_config1[0], IC0_CFG1SET); au_sync();
-	au_writel(0xffffffff, IC0_CFG2CLR); au_sync();
-	au_writel(sleep_intctl_config2[0], IC0_CFG2SET); au_sync();
-	au_writel(0xffffffff, IC0_SRCCLR); au_sync();
-	au_writel(sleep_intctl_src[0], IC0_SRCSET); au_sync();
-	au_writel(0xffffffff, IC0_ASSIGNCLR); au_sync();
-	au_writel(sleep_intctl_assign[0], IC0_ASSIGNSET); au_sync();
-	au_writel(0xffffffff, IC0_WAKECLR); au_sync();
-	au_writel(sleep_intctl_wake[0], IC0_WAKESET); au_sync();
-	au_writel(0xffffffff, IC0_RISINGCLR); au_sync();
-	au_writel(0xffffffff, IC0_FALLINGCLR); au_sync();
-	au_writel(0x00000000, IC0_TESTBIT); au_sync();
-
-	au_writel(0xffffffff, IC1_MASKCLR); au_sync();
-
-	au_writel(0xffffffff, IC1_CFG0CLR); au_sync();
-	au_writel(sleep_intctl_config0[1], IC1_CFG0SET); au_sync();
-	au_writel(0xffffffff, IC1_CFG1CLR); au_sync();
-	au_writel(sleep_intctl_config1[1], IC1_CFG1SET); au_sync();
-	au_writel(0xffffffff, IC1_CFG2CLR); au_sync();
-	au_writel(sleep_intctl_config2[1], IC1_CFG2SET); au_sync();
-	au_writel(0xffffffff, IC1_SRCCLR); au_sync();
-	au_writel(sleep_intctl_src[1], IC1_SRCSET); au_sync();
-	au_writel(0xffffffff, IC1_ASSIGNCLR); au_sync();
-	au_writel(sleep_intctl_assign[1], IC1_ASSIGNSET); au_sync();
-	au_writel(0xffffffff, IC1_WAKECLR); au_sync();
-	au_writel(sleep_intctl_wake[1], IC1_WAKESET); au_sync();
-	au_writel(0xffffffff, IC1_RISINGCLR); au_sync();
-	au_writel(0xffffffff, IC1_FALLINGCLR); au_sync();
-	au_writel(0x00000000, IC1_TESTBIT); au_sync();
-
-	au_writel(sleep_intctl_mask[1], IC1_MASKSET); au_sync();
-
-	au_writel(sleep_intctl_mask[0], IC0_MASKSET); au_sync();
-}
-#endif /* CONFIG_PM */
-
-
 static void au1x_ic0_unmask(unsigned int irq_nr)
 {
 	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
@@ -635,3 +553,91 @@
 		break;
 	}
 }
+
+struct alchemy_ic_sysdev {
+	struct sys_device sysdev;
+	void __iomem *base;
+	unsigned long pmdata[7];
+};
+
+static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state)
+{
+	struct alchemy_ic_sysdev *icdev =
+			container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+	icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD);
+	icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD);
+	icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD);
+	icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD);
+	icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD);
+	icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD);
+	icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD);
+
+	return 0;
+}
+
+static int alchemy_ic_resume(struct sys_device *dev)
+{
+	struct alchemy_ic_sysdev *icdev =
+			container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+	__raw_writel(0xffffffff, icdev->base + IC_MASKCLR);
+	__raw_writel(0xffffffff, icdev->base + IC_CFG0CLR);
+	__raw_writel(0xffffffff, icdev->base + IC_CFG1CLR);
+	__raw_writel(0xffffffff, icdev->base + IC_CFG2CLR);
+	__raw_writel(0xffffffff, icdev->base + IC_SRCCLR);
+	__raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR);
+	__raw_writel(0xffffffff, icdev->base + IC_WAKECLR);
+	__raw_writel(0xffffffff, icdev->base + IC_RISINGCLR);
+	__raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR);
+	__raw_writel(0x00000000, icdev->base + IC_TESTBIT);
+	wmb();
+	__raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET);
+	__raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET);
+	__raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET);
+	__raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET);
+	__raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET);
+	__raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET);
+	wmb();
+
+	__raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET);
+	wmb();
+
+	return 0;
+}
+
+static struct sysdev_class alchemy_ic_sysdev_class = {
+	.name		= "ic",
+	.suspend	= alchemy_ic_suspend,
+	.resume		= alchemy_ic_resume,
+};
+
+static int __init alchemy_ic_sysdev_init(void)
+{
+	struct alchemy_ic_sysdev *icdev;
+	unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR };
+	int err, i;
+
+	err = sysdev_class_register(&alchemy_ic_sysdev_class);
+	if (err)
+		return err;
+
+	for (i = 0; i < 2; i++) {
+		icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL);
+		if (!icdev)
+			return -ENOMEM;
+
+		icdev->base = ioremap(icbase[i], 0x1000);
+
+		icdev->sysdev.id = i;
+		icdev->sysdev.cls = &alchemy_ic_sysdev_class;
+		err = sysdev_register(&icdev->sysdev);
+		if (err) {
+			kfree(icdev);
+			return err;
+		}
+	}
+
+	return 0;
+}
+device_initcall(alchemy_ic_sysdev_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 6ab7b42..14eb8c49 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -36,9 +36,6 @@
 
 #include <asm/uaccess.h>
 #include <asm/mach-au1x00/au1000.h>
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
-#include <asm/mach-au1x00/au1xxx_dbdma.h>
-#endif
 
 #ifdef CONFIG_PM
 
@@ -106,9 +103,6 @@
 	sleep_usb[1] = au_readl(0xb4020024);	/* OTG_MUX */
 #endif
 
-	/* Save interrupt controller state. */
-	save_au1xxx_intctl();
-
 	/* Clocks and PLLs. */
 	sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
 	sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
@@ -132,10 +126,6 @@
 	sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
 	sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
 	sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
-	au1xxx_dbdma_suspend();
-#endif
 }
 
 static void restore_core_regs(void)
@@ -199,12 +189,6 @@
 		au_writel(sleep_uart0_linectl, UART0_ADDR + UART_LCR); au_sync();
 		au_writel(sleep_uart0_clkdiv, UART0_ADDR + UART_CLK); au_sync();
 	}
-
-	restore_au1xxx_intctl();
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
-	au1xxx_dbdma_resume();
-#endif
 }
 
 void au_sleep(void)
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index b5311d8..4ef50d8 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -27,8 +27,10 @@
 #include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-pb1x00/pb1000.h>
+#include <asm/reboot.h>
 #include <prom.h>
 
 #include "../platform.h"
@@ -38,8 +40,16 @@
 	return "Alchemy Pb1000";
 }
 
-void board_reset(void)
+static void board_reset(char *c)
 {
+	asm volatile ("jr %0" : : "r" (0xbfc00000));
+}
+
+static void board_power_off(void)
+{
+	printk(KERN_ALERT "It's now safe to remove power\n");
+	while (1)
+		asm volatile (".set mips3 ; wait ; .set mips1");
 }
 
 void __init board_setup(void)
@@ -177,6 +187,10 @@
 		au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
 		break;
 	}
+
+	pm_power_off = board_power_off;
+	_machine_halt = board_power_off;
+	_machine_restart = board_reset;
 }
 
 static int __init pb1000_init_irq(void)
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index c7b4caa..90dda5f 100644
--- a/arch/mips/alchemy/devboards/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -39,11 +39,6 @@
 	return "Alchemy Pb1100";
 }
 
-void board_reset(void)
-{
-	bcsr_write(BCSR_SYSTEM, 0);
-}
-
 void __init board_setup(void)
 {
 	volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 3184063f..8b4466f 100644
--- a/arch/mips/alchemy/devboards/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -48,12 +48,6 @@
 	return "Alchemy Pb1200";
 }
 
-void board_reset(void)
-{
-	bcsr_write(BCSR_RESETS, 0);
-	bcsr_write(BCSR_SYSTEM, 0);
-}
-
 void __init board_setup(void)
 {
 	printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index fa9770a..9cd9dfa 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -45,11 +45,6 @@
 	return "Alchemy Pb1500";
 }
 
-void board_reset(void)
-{
-	bcsr_write(BCSR_SYSTEM, 0);
-}
-
 void __init board_setup(void)
 {
 	u32 pin_func;
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index 1e8fb3d..9d7d6ed 100644
--- a/arch/mips/alchemy/devboards/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -48,11 +48,6 @@
 	return "Alchemy Pb1550";
 }
 
-void board_reset(void)
-{
-	bcsr_write(BCSR_SYSTEM, 0);
-}
-
 void __init board_setup(void)
 {
 	u32 pin_func;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 2fafc78..566f2d7 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,7 +576,6 @@
 {
 	void __iomem *bootcr;
 	u32 val;
-	u16 chip_id;
 	int res;
 
 	res = ar7_register_uarts();
@@ -635,18 +634,10 @@
 	val = readl(bootcr);
 	iounmap(bootcr);
 	if (val & AR7_WDT_HW_ENA) {
-		chip_id = ar7_chip_id();
-		switch (chip_id) {
-		case AR7_CHIP_7100:
-		case AR7_CHIP_7200:
-			ar7_wdt_res.start = AR7_REGS_WDT;
-			break;
-		case AR7_CHIP_7300:
+		if (ar7_has_high_vlynq())
 			ar7_wdt_res.start = UR8_REGS_WDT;
-			break;
-		default:
-			break;
-		}
+		else
+			ar7_wdt_res.start = AR7_REGS_WDT;
 
 		ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
 		res = platform_device_register(&ar7_wdt);
@@ -656,4 +647,4 @@
 
 	return 0;
 }
-arch_initcall(ar7_register_devices);
+device_initcall(ar7_register_devices);
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 315bc7f..f560fe7 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -91,7 +91,7 @@
 
 	spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
 	tmp = bcm_gpio_readl(reg);
-	if (dir == GPIO_DIR_IN)
+	if (dir == BCM63XX_GPIO_DIR_IN)
 		tmp &= ~mask;
 	else
 		tmp |= mask;
@@ -103,14 +103,14 @@
 
 static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
 {
-	return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_IN);
+	return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
 }
 
 static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
 					 unsigned gpio, int value)
 {
 	bcm63xx_gpio_set(chip, gpio, value);
-	return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_OUT);
+	return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
 }
 
 
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 8240728..83eac37 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -65,7 +65,11 @@
 	p->type = PORT_OCTEON;
 	p->iotype = UPIO_MEM;
 	p->regshift = 3;	/* I/O addresses are every 8 bytes */
-	p->uartclk = mips_hpt_frequency;
+	if (octeon_is_simulation())
+		/* Make simulator output fast*/
+		p->uartclk = 115200 * 16;
+	else
+		p->uartclk = mips_hpt_frequency;
 	p->serial_in = octeon_serial_in;
 	p->serial_out = octeon_serial_out;
 }
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 9a06fa9..d1b5ffa 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -403,7 +403,6 @@
 	const int coreid = cvmx_get_core_num();
 	int i;
 	int argc;
-	struct uart_port octeon_port;
 #ifdef CONFIG_CAVIUM_RESERVE32
 	int64_t addr = -1;
 #endif
@@ -610,30 +609,6 @@
 	_machine_restart = octeon_restart;
 	_machine_halt = octeon_halt;
 
-	memset(&octeon_port, 0, sizeof(octeon_port));
-	/*
-	 * For early_serial_setup we don't set the port type or
-	 * UPF_FIXED_TYPE.
-	 */
-	octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
-	octeon_port.iotype = UPIO_MEM;
-	/* I/O addresses are every 8 bytes */
-	octeon_port.regshift = 3;
-	/* Clock rate of the chip */
-	octeon_port.uartclk = mips_hpt_frequency;
-	octeon_port.fifosize = 64;
-	octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
-	octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
-	octeon_port.serial_in = octeon_serial_in;
-	octeon_port.serial_out = octeon_serial_out;
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
-	octeon_port.line = 0;
-#else
-	octeon_port.line = octeon_uart;
-#endif
-	octeon_port.irq = 42 + octeon_uart;
-	early_serial_setup(&octeon_port);
-
 	octeon_user_io_init();
 	register_smp_ops(&octeon_smp_ops);
 }
@@ -727,7 +702,7 @@
 	} while ((lsrval & 0x20) == 0);
 
 	/* Write the byte */
-	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
+	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
 	return 1;
 }
 
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 5a5b6ba..e700095 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Wed Jun 24 14:08:59 2009
+# Linux kernel version: 2.6.34-rc6
+# Sat May  1 11:35:01 2010
 #
 CONFIG_MIPS=y
 
@@ -11,11 +11,12 @@
 # CONFIG_MACH_ALCHEMY is not set
 CONFIG_AR7=y
 # CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
 # CONFIG_NEC_MARKEINS is not set
@@ -26,6 +27,7 @@
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
 # CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
 # CONFIG_SGI_IP22 is not set
 # CONFIG_SGI_IP27 is not set
 # CONFIG_SGI_IP28 is not set
@@ -46,6 +48,7 @@
 # CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
 # CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
 # CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -63,10 +66,8 @@
 CONFIG_CSRC_R4K_LIB=y
 CONFIG_CSRC_R4K=y
 CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
 # CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_GPIO=y
 # CONFIG_CPU_BIG_ENDIAN is not set
@@ -81,7 +82,8 @@
 #
 # CPU selection
 #
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
 CONFIG_CPU_MIPS32_R1=y
 # CONFIG_CPU_MIPS32_R2 is not set
 # CONFIG_CPU_MIPS64_R1 is not set
@@ -103,6 +105,8 @@
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
 # CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
 CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_CPU_MIPS32=y
 CONFIG_CPU_MIPSR1=y
@@ -124,6 +128,7 @@
 CONFIG_MIPS_MT_DISABLED=y
 # CONFIG_MIPS_MT_SMP is not set
 # CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
 CONFIG_CPU_HAS_SYNC=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
@@ -141,8 +146,7 @@
 # CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_TICK_ONESHOT=y
 # CONFIG_NO_HZ is not set
@@ -165,6 +169,7 @@
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -174,6 +179,14 @@
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_LOCALVERSION=""
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
+# CONFIG_KERNEL_LZO is not set
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
@@ -186,14 +199,12 @@
 #
 # RCU Subsystem
 #
-CONFIG_CLASSIC_RCU=y
 # CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
 # CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
 CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -204,6 +215,7 @@
 CONFIG_RD_GZIP=y
 # CONFIG_RD_BZIP2 is not set
 CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
@@ -225,19 +237,22 @@
 CONFIG_AIO=y
 
 #
-# Performance Counters
+# Kernel Performance Events And Counters
 #
 # CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_STRIP_ASM_SYMS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
 # CONFIG_SLOW_WORK is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_BASE_SMALL=0
@@ -248,7 +263,7 @@
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_BLK_DEV_INTEGRITY is not set
 
@@ -256,14 +271,41 @@
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
 CONFIG_IOSCHED_DEADLINE=y
 # CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
 CONFIG_DEFAULT_DEADLINE=y
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
 # CONFIG_FREEZER is not set
 
 #
@@ -293,7 +335,6 @@
 # Networking options
 #
 CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
@@ -377,6 +418,7 @@
 CONFIG_NETFILTER_XTABLES=m
 # CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
 # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
 # CONFIG_NETFILTER_XT_TARGET_DSCP is not set
 # CONFIG_NETFILTER_XT_TARGET_HL is not set
 # CONFIG_NETFILTER_XT_TARGET_LED is not set
@@ -458,6 +500,7 @@
 # CONFIG_IP_NF_ARPTABLES is not set
 # CONFIG_IP_DCCP is not set
 # CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 CONFIG_ATM=m
 # CONFIG_ATM_CLIP is not set
@@ -466,6 +509,7 @@
 CONFIG_ATM_BR2684_IPFILTER=y
 CONFIG_STP=y
 CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
 # CONFIG_NET_DSA is not set
 CONFIG_VLAN_8021Q=y
 # CONFIG_VLAN_8021Q_GVRP is not set
@@ -541,20 +585,19 @@
 # CONFIG_AF_RXRPC is not set
 CONFIG_FIB_RULES=y
 CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
 CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
 # CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
 # CONFIG_CFG80211_DEBUGFS is not set
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-CONFIG_WIRELESS_EXT=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
 CONFIG_WIRELESS_EXT_SYSFS=y
 # CONFIG_LIB80211 is not set
 CONFIG_MAC80211=m
-CONFIG_MAC80211_DEFAULT_PS=y
-CONFIG_MAC80211_DEFAULT_PS_VALUE=1
-
-#
-# Rate control algorithm selection
-#
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_MINSTREL=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
@@ -576,6 +619,7 @@
 # Generic Driver Options
 #
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
@@ -585,9 +629,9 @@
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
 # CONFIG_MTD_CONCAT is not set
 CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
 # CONFIG_MTD_AR7_PARTS is not set
@@ -636,6 +680,7 @@
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PHYSMAP=y
 # CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
 # CONFIG_MTD_PLATRAM is not set
 
 #
@@ -668,6 +713,10 @@
 CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_COW_COMMON is not set
 # CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
 # CONFIG_BLK_DEV_NBD is not set
 # CONFIG_BLK_DEV_RAM is not set
 # CONFIG_CDROM_PKTCDVD is not set
@@ -687,6 +736,7 @@
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 # CONFIG_SCSI is not set
 # CONFIG_SCSI_DMA is not set
@@ -727,6 +777,7 @@
 # CONFIG_SMC91X is not set
 # CONFIG_DM9000 is not set
 # CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
 # CONFIG_DNET is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -737,23 +788,21 @@
 # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 # CONFIG_B44 is not set
 # CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 CONFIG_CPMAC=y
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_LIBERTAS is not set
+CONFIG_WLAN=y
 # CONFIG_LIBERTAS_THINFIRM is not set
 # CONFIG_MAC80211_HWSIM is not set
-# CONFIG_P54_COMMON is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_ATH_COMMON is not set
 # CONFIG_B43 is not set
 # CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
 # CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -813,6 +862,7 @@
 #
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -824,11 +874,39 @@
 # CONFIG_TCG_TPM is not set
 # CONFIG_I2C is not set
 # CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
 # CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -842,13 +920,7 @@
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB=y
-# CONFIG_SSB_SILENT is not set
-# CONFIG_SSB_DEBUG is not set
-CONFIG_SSB_SERIAL=y
-CONFIG_SSB_DRIVER_MIPS=y
-CONFIG_SSB_EMBEDDED=y
-CONFIG_SSB_DRIVER_EXTIF=y
+# CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
@@ -882,15 +954,18 @@
 #
 # LED drivers
 #
-# CONFIG_LEDS_GPIO is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 
 #
@@ -921,6 +996,7 @@
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
 CONFIG_FILE_LOCKING=y
 CONFIG_FSNOTIFY=y
 # CONFIG_DNOTIFY is not set
@@ -984,6 +1060,7 @@
 CONFIG_JFFS2_CMODE_PRIORITY=y
 # CONFIG_JFFS2_CMODE_SIZE is not set
 # CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 CONFIG_SQUASHFS=y
 # CONFIG_SQUASHFS_EMBEDDED is not set
@@ -996,11 +1073,11 @@
 # CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 # CONFIG_NFS_FS is not set
 # CONFIG_NFSD is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
@@ -1039,21 +1116,29 @@
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
 # CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_TRACING_SUPPORT=y
 # CONFIG_FTRACE is not set
 # CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
 # CONFIG_CMDLINE_OVERRIDE is not set
+# CONFIG_SPINLOCK_TEST is not set
 
 #
 # Security options
@@ -1061,13 +1146,16 @@
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
 # CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
-# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=m
 CONFIG_CRYPTO_ALGAPI2=m
 CONFIG_CRYPTO_AEAD2=m
@@ -1108,11 +1196,13 @@
 #
 # CONFIG_CRYPTO_HMAC is not set
 # CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
 
 #
 # Digest
 #
 # CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
 # CONFIG_CRYPTO_MD4 is not set
 # CONFIG_CRYPTO_MD5 is not set
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 267bd46..bbd826b 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc2
-# Mon Feb 18 11:55:24 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May  1 12:14:30 2010
 #
 CONFIG_MIPS=y
 
@@ -9,20 +9,25 @@
 # Machine selection
 #
 # CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
 CONFIG_BCM47XX=y
+# CONFIG_BCM63XX is not set
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
 # CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
 # CONFIG_SGI_IP22 is not set
 # CONFIG_SGI_IP27 is not set
 # CONFIG_SGI_IP28 is not set
@@ -36,10 +41,14 @@
 # CONFIG_SIBYTE_SENTOSA is not set
 # CONFIG_SIBYTE_BIGSUR is not set
 # CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -50,16 +59,16 @@
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
 CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
 CONFIG_CSRC_R4K=y
 CONFIG_CFE=y
 CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
 # CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_GPIO=y
 # CONFIG_CPU_BIG_ENDIAN is not set
@@ -71,7 +80,8 @@
 #
 # CPU selection
 #
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
 CONFIG_CPU_MIPS32_R1=y
 # CONFIG_CPU_MIPS32_R2 is not set
 # CONFIG_CPU_MIPS64_R1 is not set
@@ -84,6 +94,7 @@
 # CONFIG_CPU_TX49XX is not set
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -91,11 +102,13 @@
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
 CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_CPU_MIPS32=y
 CONFIG_CPU_MIPSR1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
 
 #
 # Kernel type
@@ -105,11 +118,13 @@
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_8KB is not set
 # CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
 # CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_CPU_HAS_PREFETCH=y
 CONFIG_MIPS_MT_DISABLED=y
 # CONFIG_MIPS_MT_SMP is not set
 # CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
 CONFIG_CPU_HAS_SYNC=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
@@ -122,12 +137,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_TICK_ONESHOT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -144,12 +160,12 @@
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
-CONFIG_RCU_TRACE=y
 CONFIG_KEXEC=y
 # CONFIG_SECCOMP is not set
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -163,6 +179,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -170,25 +187,37 @@
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=17
 CONFIG_CGROUPS=y
 # CONFIG_CGROUP_DEBUG is not set
 CONFIG_CGROUP_NS=y
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
 CONFIG_CGROUP_CPUACCT=y
 # CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 CONFIG_RELAY=y
 # CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
@@ -197,54 +226,90 @@
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_BLOCK=y
-CONFIG_LBD=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LSF=y
+CONFIG_LBDAF=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
+# CONFIG_CFQ_GROUP_IOSCHED is not set
 # CONFIG_DEFAULT_DEADLINE is not set
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -253,7 +318,8 @@
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
 CONFIG_MMU=y
 # CONFIG_PCCARD is not set
 # CONFIG_HOTPLUG_PCI is not set
@@ -262,31 +328,30 @@
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_TRAD_SIGNALS=y
 
 #
 # Power management options
 #
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
 # Networking options
 #
 CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_XFRM_USER=m
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
 # CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
 CONFIG_NET_KEY=m
 # CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
@@ -315,7 +380,7 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_LRO=m
+CONFIG_INET_LRO=y
 CONFIG_INET_DIAG=m
 CONFIG_INET_TCP_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
@@ -339,7 +404,112 @@
 # CONFIG_DEFAULT_RENO is not set
 CONFIG_DEFAULT_TCP_CONG="bic"
 # CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_ACCT=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+# CONFIG_NETFILTER_TPROXY is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
 CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
 # CONFIG_IP_VS_DEBUG is not set
 CONFIG_IP_VS_TAB_BITS=12
 
@@ -348,8 +518,10 @@
 #
 CONFIG_IP_VS_PROTO_TCP=y
 CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
 CONFIG_IP_VS_PROTO_ESP=y
 CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
 
 #
 # IPVS scheduler
@@ -369,111 +541,19 @@
 # IPVS application helper
 #
 CONFIG_IP_VS_FTP=m
-CONFIG_IPV6=m
-CONFIG_IPV6_PRIVACY=y
-# CONFIG_IPV6_ROUTER_PREF is not set
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-# CONFIG_IPV6_MIP6 is not set
-CONFIG_INET6_XFRM_TUNNEL=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# Core Netfilter Configuration
-#
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CT_ACCT=y
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_GRE=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
-# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 
 #
 # IP: Netfilter Configuration
 #
+CONFIG_NF_DEFRAG_IPV4=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NF_CONNTRACK_PROC_COMPAT=y
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_LOG=m
@@ -481,10 +561,13 @@
 CONFIG_NF_NAT=m
 CONFIG_NF_NAT_NEEDED=y
 CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
 CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
 CONFIG_NF_NAT_FTP=m
 CONFIG_NF_NAT_IRC=m
 CONFIG_NF_NAT_TFTP=m
@@ -493,9 +576,9 @@
 CONFIG_NF_NAT_H323=m
 CONFIG_NF_NAT_SIP=m
 CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_RAW=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
@@ -507,24 +590,20 @@
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
 CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
 CONFIG_IP6_NF_RAW=m
-
-#
-# Bridge: Netfilter Configuration
-#
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -533,6 +612,7 @@
 CONFIG_BRIDGE_EBT_AMONG=m
 CONFIG_BRIDGE_EBT_ARP=m
 CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
 CONFIG_BRIDGE_EBT_LIMIT=m
 CONFIG_BRIDGE_EBT_MARK=m
 CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -545,31 +625,30 @@
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
 CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
 CONFIG_IP_DCCP=m
 CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
 
 #
 # DCCP CCIDs Configuration (EXPERIMENTAL)
 #
-CONFIG_IP_DCCP_CCID2=m
 # CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_CCID3=y
 # CONFIG_IP_DCCP_CCID3_DEBUG is not set
 CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_TFRC_LIB=y
 CONFIG_IP_SCTP=m
 # CONFIG_SCTP_DBG_MSG is not set
 # CONFIG_SCTP_DBG_OBJCNT is not set
 # CONFIG_SCTP_HMAC_NONE is not set
 # CONFIG_SCTP_HMAC_SHA1 is not set
 CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
 CONFIG_TIPC=m
 CONFIG_TIPC_ADVANCED=y
 CONFIG_TIPC_ZONES=3
 CONFIG_TIPC_CLUSTERS=1
 CONFIG_TIPC_NODES=255
-CONFIG_TIPC_SLAVE_NODES=0
 CONFIG_TIPC_PORTS=8191
 CONFIG_TIPC_LOG=0
 # CONFIG_TIPC_DEBUG is not set
@@ -580,8 +659,12 @@
 CONFIG_ATM_MPOA=m
 CONFIG_ATM_BR2684=m
 # CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
 CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
 # CONFIG_DECNET is not set
 CONFIG_LLC=m
 # CONFIG_LLC2 is not set
@@ -591,6 +674,8 @@
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
 
 #
@@ -601,7 +686,7 @@
 CONFIG_NET_SCH_HFSC=m
 CONFIG_NET_SCH_ATM=m
 CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
 CONFIG_NET_SCH_RED=m
 CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
@@ -609,6 +694,7 @@
 CONFIG_NET_SCH_GRED=m
 CONFIG_NET_SCH_DSMARK=m
 CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
 CONFIG_NET_SCH_INGRESS=m
 
 #
@@ -626,6 +712,7 @@
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 # CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 CONFIG_NET_EMATCH_CMP=m
@@ -642,8 +729,10 @@
 CONFIG_NET_ACT_NAT=m
 CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
+# CONFIG_NET_ACT_SKBEDIT is not set
 CONFIG_NET_CLS_IND=y
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -651,58 +740,7 @@
 CONFIG_NET_PKTGEN=m
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-# CONFIG_IRDA_ULTRA is not set
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_TOIM3232_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_MCS_FIR=m
+# CONFIG_IRDA is not set
 CONFIG_BT=m
 # CONFIG_BT_L2CAP is not set
 # CONFIG_BT_SCO is not set
@@ -710,8 +748,7 @@
 #
 # Bluetooth device drivers
 #
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
@@ -720,51 +757,37 @@
 CONFIG_BT_HCIBPA10X=m
 CONFIG_BT_HCIBFUSB=m
 CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
 # CONFIG_AF_RXRPC is not set
 CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
 CONFIG_CFG80211=m
-CONFIG_NL80211=y
-CONFIG_WIRELESS_EXT=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
 CONFIG_MAC80211=m
-
-#
-# Rate control algorithm selection
-#
-CONFIG_MAC80211_RC_DEFAULT_PID=y
-# CONFIG_MAC80211_RC_DEFAULT_SIMPLE is not set
-# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
-
-#
-# Selecting 'y' for an algorithm will
-#
-
-#
-# build the algorithm into mac80211.
-#
-CONFIG_MAC80211_RC_DEFAULT="pid"
 CONFIG_MAC80211_RC_PID=y
-# CONFIG_MAC80211_RC_SIMPLE is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+CONFIG_MAC80211_MESH=y
 CONFIG_MAC80211_LEDS=y
 # CONFIG_MAC80211_DEBUGFS is not set
-# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set
-# CONFIG_MAC80211_DEBUG is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
 CONFIG_RFKILL=m
-CONFIG_RFKILL_INPUT=m
 CONFIG_RFKILL_LEDS=y
-CONFIG_NET_9P=m
-CONFIG_NET_9P_FD=m
-# CONFIG_NET_9P_DEBUG is not set
+CONFIG_RFKILL_INPUT=y
+# CONFIG_NET_9P is not set
 
 #
 # Device Drivers
@@ -774,17 +797,22 @@
 # Generic Driver Options
 #
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=m
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_SYS_HYPERVISOR is not set
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
 CONFIG_MTD_CONCAT=y
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -829,9 +857,7 @@
 #
 # CONFIG_MTD_COMPLEX_MAPPINGS is not set
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
 # CONFIG_MTD_INTEL_VR_NOR is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -854,6 +880,11 @@
 # CONFIG_MTD_ONENAND is not set
 
 #
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
 # UBI - Unsorted block images
 #
 # CONFIG_MTD_UBI is not set
@@ -866,6 +897,7 @@
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
 CONFIG_BLK_DEV_NBD=m
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_UB is not set
@@ -875,18 +907,27 @@
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 CONFIG_ATA_OVER_ETH=m
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
@@ -904,10 +945,6 @@
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
@@ -924,21 +961,30 @@
 # CONFIG_SCSI_SRP_ATTRS is not set
 CONFIG_SCSI_LOWLEVEL=y
 CONFIG_ISCSI_TCP=m
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
 # CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
 # CONFIG_SCSI_ACARD is not set
 # CONFIG_SCSI_AACRAID is not set
 # CONFIG_SCSI_AIC7XXX is not set
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
 # CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
 # CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
@@ -954,7 +1000,12 @@
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_NSP32 is not set
 # CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
 # CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 # CONFIG_FUSION is not set
@@ -962,11 +1013,18 @@
 #
 # IEEE 1394 (FireWire) support
 #
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_IFB is not set
 CONFIG_DUMMY=m
 # CONFIG_BONDING is not set
@@ -990,8 +1048,11 @@
 CONFIG_BROADCOM_PHY=m
 CONFIG_ICPLUS_PHY=m
 # CONFIG_REALTEK_PHY is not set
-# CONFIG_FIXED_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
 CONFIG_MDIO_BITBANG=m
+# CONFIG_MDIO_GPIO is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 # CONFIG_AX88796 is not set
@@ -999,24 +1060,31 @@
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
 # CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
 CONFIG_B44=y
 CONFIG_B44_PCI_AUTOSELECT=y
 CONFIG_B44_PCICORE_AUTOSELECT=y
 CONFIG_B44_PCI=y
 # CONFIG_FORCEDETH is not set
 # CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
 # CONFIG_E100 is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -1026,41 +1094,67 @@
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 # CONFIG_VIA_RHINE is not set
 # CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
 # CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
 # CONFIG_PRISM54 is not set
 # CONFIG_USB_ZD1201 is not set
 # CONFIG_USB_NET_RNDIS_WLAN is not set
 # CONFIG_RTL8180 is not set
 # CONFIG_RTL8187 is not set
 # CONFIG_ADM8211 is not set
-# CONFIG_P54_COMMON is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
 CONFIG_ATH5K=m
-# CONFIG_IWL4965 is not set
-# CONFIG_IWL3945 is not set
+# CONFIG_ATH5K_DEBUG is not set
+# CONFIG_ATH9K is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
 # CONFIG_HOSTAP is not set
-# CONFIG_BCM43XX is not set
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
 CONFIG_ZD1211RW=m
 # CONFIG_ZD1211RW_DEBUG is not set
-# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 
 #
 # USB Network Adapters
@@ -1072,7 +1166,10 @@
 CONFIG_USB_USBNET=m
 CONFIG_USB_NET_AX8817X=m
 CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
 CONFIG_USB_NET_DM9601=m
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
 CONFIG_USB_NET_GL620A=m
 CONFIG_USB_NET_NET1080=m
 CONFIG_USB_NET_PLUSB=m
@@ -1086,6 +1183,10 @@
 CONFIG_USB_EPSON2888=y
 CONFIG_USB_KC2190=y
 CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
 # CONFIG_WAN is not set
 CONFIG_ATM_DRIVERS=y
 CONFIG_ATM_DUMMY=m
@@ -1099,8 +1200,9 @@
 # CONFIG_ATM_AMBASSADOR is not set
 # CONFIG_ATM_HORIZON is not set
 # CONFIG_ATM_IA is not set
-# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_FORE200E is not set
 # CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
 CONFIG_PPP=m
@@ -1120,11 +1222,10 @@
 # CONFIG_SLIP_SMART is not set
 # CONFIG_SLIP_MODE_SLIP6 is not set
 # CONFIG_NET_FC is not set
-CONFIG_NETCONSOLE=y
-# CONFIG_NETCONSOLE_DYNAMIC is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
 # CONFIG_ISDN is not set
 # CONFIG_PHONE is not set
 
@@ -1134,6 +1235,7 @@
 CONFIG_INPUT=y
 # CONFIG_INPUT_FF_MEMLESS is not set
 # CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
 
 #
 # Userland interfaces
@@ -1166,6 +1268,7 @@
 # Character devices
 #
 # CONFIG_VT is not set
+CONFIG_DEVKMEM=y
 # CONFIG_SERIAL_NONSTANDARD is not set
 # CONFIG_NOZOMI is not set
 
@@ -1185,23 +1288,24 @@
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
 # CONFIG_I2C is not set
+# CONFIG_SPI is not set
 
 #
-# SPI support
+# PPS support
 #
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+# CONFIG_PPS is not set
 CONFIG_W1=m
 CONFIG_W1_CON=y
 
@@ -1217,21 +1321,45 @@
 #
 CONFIG_W1_SLAVE_THERM=m
 CONFIG_W1_SLAVE_SMEM=m
+# CONFIG_W1_SLAVE_DS2431 is not set
 CONFIG_W1_SLAVE_DS2433=m
 # CONFIG_W1_SLAVE_DS2433_CRC is not set
 CONFIG_W1_SLAVE_DS2760=m
+# CONFIG_W1_SLAVE_BQ27000 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
 CONFIG_THERMAL=y
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_BCM47XX_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 CONFIG_SSB=y
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
 CONFIG_SSB_PCIHOST_POSSIBLE=y
 CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
 # CONFIG_SSB_SILENT is not set
 # CONFIG_SSB_DEBUG is not set
 CONFIG_SSB_SERIAL=y
@@ -1239,24 +1367,26 @@
 CONFIG_SSB_DRIVER_PCICORE=y
 CONFIG_SSB_PCICORE_HOSTMODE=y
 CONFIG_SSB_DRIVER_MIPS=y
+CONFIG_SSB_EMBEDDED=y
 CONFIG_SSB_DRIVER_EXTIF=y
+CONFIG_SSB_DRIVER_GIGE=y
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
 
 #
 # Graphics support
 #
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
 # CONFIG_DRM is not set
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1271,15 +1401,9 @@
 #
 # Display hardware drivers
 #
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -1292,24 +1416,24 @@
 CONFIG_SND_PCM_OSS=m
 CONFIG_SND_PCM_OSS_PLUGINS=y
 CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_HRTIMER is not set
 # CONFIG_SND_DYNAMIC_MINORS is not set
 CONFIG_SND_SUPPORT_OLD_API=y
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
 CONFIG_SND_DUMMY=m
 CONFIG_SND_VIRMIDI=m
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# PCI devices
-#
+CONFIG_SND_PCI=y
 # CONFIG_SND_AD1889 is not set
 # CONFIG_SND_ALS300 is not set
 # CONFIG_SND_ALI5451 is not set
@@ -1318,6 +1442,7 @@
 # CONFIG_SND_AU8810 is not set
 # CONFIG_SND_AU8820 is not set
 # CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
 # CONFIG_SND_AZT3328 is not set
 # CONFIG_SND_BT87X is not set
 # CONFIG_SND_CA0106 is not set
@@ -1325,6 +1450,8 @@
 # CONFIG_SND_OXYGEN is not set
 # CONFIG_SND_CS4281 is not set
 # CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
 # CONFIG_SND_DARLA20 is not set
 # CONFIG_SND_GINA20 is not set
 # CONFIG_SND_LAYLA20 is not set
@@ -1337,6 +1464,8 @@
 # CONFIG_SND_INDIGO is not set
 # CONFIG_SND_INDIGOIO is not set
 # CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
 # CONFIG_SND_EMU10K1 is not set
 # CONFIG_SND_EMU10K1X is not set
 # CONFIG_SND_ENS1370 is not set
@@ -1353,6 +1482,7 @@
 # CONFIG_SND_INTEL8X0 is not set
 # CONFIG_SND_INTEL8X0M is not set
 # CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
 # CONFIG_SND_MAESTRO3 is not set
 # CONFIG_SND_MIXART is not set
 # CONFIG_SND_NM256 is not set
@@ -1368,45 +1498,22 @@
 # CONFIG_SND_VIRTUOSO is not set
 # CONFIG_SND_VX222 is not set
 # CONFIG_SND_YMFPCI is not set
-
-#
-# ALSA MIPS devices
-#
-
-#
-# USB devices
-#
+CONFIG_SND_MIPS=y
+CONFIG_SND_USB=y
 CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
 # CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
 # CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
 # CONFIG_SOUND_PRIME is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=m
-# CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
 
 #
 # USB Input Devices
 #
 CONFIG_USB_HID=m
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
 CONFIG_USB_HIDDEV=y
 
 #
@@ -1414,6 +1521,41 @@
 #
 # CONFIG_USB_KBD is not set
 # CONFIG_USB_MOUSE is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1429,14 +1571,24 @@
 # CONFIG_USB_DEVICE_CLASS is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
 #
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
 CONFIG_USB_OHCI_HCD=y
 # CONFIG_USB_OHCI_HCD_SSB is not set
 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1446,26 +1598,30 @@
 CONFIG_USB_U132_HCD=m
 # CONFIG_USB_SL811_HCD is not set
 CONFIG_USB_R8A66597_HCD=m
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
 
 #
 # USB Device Class drivers
 #
 CONFIG_USB_ACM=m
 CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 
 #
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
 #
 CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
 CONFIG_USB_STORAGE_DATAFAB=y
 CONFIG_USB_STORAGE_FREECOM=y
 # CONFIG_USB_STORAGE_ISD200 is not set
-CONFIG_USB_STORAGE_DPCM=y
 CONFIG_USB_STORAGE_USBAT=y
 CONFIG_USB_STORAGE_SDDR09=y
 CONFIG_USB_STORAGE_SDDR55=y
@@ -1473,6 +1629,7 @@
 CONFIG_USB_STORAGE_ALAUDA=y
 CONFIG_USB_STORAGE_ONETOUCH=y
 CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
 # CONFIG_USB_LIBUSUAL is not set
 
 #
@@ -1480,7 +1637,6 @@
 #
 CONFIG_USB_MDC800=m
 CONFIG_USB_MICROTEK=m
-# CONFIG_USB_MON is not set
 
 #
 # USB port drivers
@@ -1489,13 +1645,12 @@
 CONFIG_USB_EZUSB=y
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
 CONFIG_USB_SERIAL_ARK3116=m
 CONFIG_USB_SERIAL_BELKIN=m
 CONFIG_USB_SERIAL_CH341=m
 # CONFIG_USB_SERIAL_WHITEHEAT is not set
 CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
 CONFIG_USB_SERIAL_CYPRESS_M8=m
 CONFIG_USB_SERIAL_EMPEG=m
 CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -1515,18 +1670,26 @@
 CONFIG_USB_SERIAL_MCT_U232=m
 CONFIG_USB_SERIAL_MOS7720=m
 CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
 CONFIG_USB_SERIAL_NAVMAN=m
 CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_SERIAL_OTI6858=m
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
 CONFIG_USB_SERIAL_HP4X=m
 CONFIG_USB_SERIAL_SAFE=m
 # CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
 CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
 # CONFIG_USB_SERIAL_TI is not set
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OPTION=m
 CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
 CONFIG_USB_SERIAL_DEBUG=m
 
 #
@@ -1535,18 +1698,13 @@
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-CONFIG_USB_BERRY_CHARGE=m
 CONFIG_USB_LED=m
 CONFIG_USB_CYPRESS_CY7C63=m
 CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
 CONFIG_USB_IDMOUSE=m
 CONFIG_USB_FTDI_ELAN=m
 # CONFIG_USB_APPLEDISPLAY is not set
@@ -1555,6 +1713,7 @@
 CONFIG_USB_TRANCEVIBRATOR=m
 CONFIG_USB_IOWARRIOR=m
 CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
 CONFIG_USB_ATM=m
 CONFIG_USB_SPEEDTOUCH=m
 CONFIG_USB_CXACRU=m
@@ -1563,30 +1722,51 @@
 CONFIG_USB_GADGET=m
 # CONFIG_USB_GADGET_DEBUG_FILES is not set
 # CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
 CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
 # CONFIG_USB_GADGET_ATMEL_USBA is not set
 # CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_LH7A40X is not set
 # CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
 # CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
 # CONFIG_USB_GADGET_DUMMY_HCD is not set
 CONFIG_USB_GADGET_DUALSPEED=y
 CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
 CONFIG_USB_ETH=m
 CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
 # CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_MIDI_GADGET=m
 # CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1596,21 +1776,33 @@
 # LED drivers
 #
 CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
 CONFIG_RTC_LIB=y
 # CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
 
 #
-# Userspace I/O
+# TI VLYNQ
 #
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -1621,10 +1813,11 @@
 CONFIG_EXT2_FS_SECURITY=y
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=y
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=y
@@ -1642,28 +1835,39 @@
 CONFIG_FS_POSIX_ACL=y
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
+# CONFIG_XFS_DEBUG is not set
 CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_NOLOCK=m
-CONFIG_GFS2_FS_LOCKING_DLM=m
+# CONFIG_GFS2_FS_LOCKING_DLM is not set
 # CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=m
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_QUOTACTL=y
 CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
 CONFIG_GENERIC_ACL=y
 
 #
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
 # CD-ROM/DVD Filesystems
 #
 CONFIG_ISO9660_FS=m
@@ -1690,15 +1894,13 @@
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 CONFIG_ADFS_FS=m
 # CONFIG_ADFS_FS_RW is not set
 CONFIG_AFFS_FS=m
@@ -1721,12 +1923,19 @@
 # CONFIG_JFFS2_LZO is not set
 CONFIG_JFFS2_RTIME=y
 # CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
 CONFIG_CRAMFS=m
+# CONFIG_SQUASHFS is not set
 CONFIG_VXFS_FS=m
 CONFIG_MINIX_FS=m
+# CONFIG_OMFS_FS is not set
 CONFIG_HPFS_FS=m
 CONFIG_QNX4FS_FS=m
 CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 # CONFIG_UFS_FS_WRITE is not set
@@ -1736,13 +1945,12 @@
 CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
-# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFS_V4_1 is not set
 CONFIG_NFSD=m
 CONFIG_NFSD_V2_ACL=y
 CONFIG_NFSD_V3=y
 CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=m
@@ -1750,10 +1958,10 @@
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC_GSS=m
-CONFIG_SUNRPC_BIND34=y
 CONFIG_RPCSEC_GSS_KRB5=m
 CONFIG_RPCSEC_GSS_SPKM3=m
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 CONFIG_CIFS=m
 # CONFIG_CIFS_STATS is not set
 # CONFIG_CIFS_WEAK_PW_HASH is not set
@@ -1771,9 +1979,7 @@
 CONFIG_NCPFS_NLS=y
 CONFIG_NCPFS_EXTRAS=y
 CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
 # CONFIG_AFS_FS is not set
-CONFIG_9P_FS=m
 
 #
 # Partition Types
@@ -1846,90 +2052,167 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
 # CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
 
 #
 # Security options
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
 CONFIG_CRYPTO_SHA1=m
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_XTS=m
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
 CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_SEED=m
 # CONFIG_CRYPTO_SALSA20 is not set
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_ZLIB is not set
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_HW=y
 # CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
 CONFIG_CRC_ITU_T=m
 CONFIG_CRC32=y
 CONFIG_CRC7=m
 CONFIG_LIBCRC32C=m
 CONFIG_AUDIT_GENERIC=y
-CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_INFLATE=y
 CONFIG_ZLIB_DEFLATE=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_LZMA=y
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_TEXTSEARCH_BM=m
 CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 144b94d..cff8f4c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc8
-# Sun Sep 30 12:56:10 2007
+# Linux kernel version: 2.6.34-rc6
+# Sat May  1 13:39:10 2010
 #
 CONFIG_MIPS=y
 
@@ -9,20 +9,28 @@
 # Machine selection
 #
 CONFIG_MACH_ALCHEMY=y
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
 # CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
 # CONFIG_SGI_IP22 is not set
 # CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
 # CONFIG_SGI_IP32 is not set
 # CONFIG_SIBYTE_CRHINE is not set
 # CONFIG_SIBYTE_CARMEL is not set
@@ -33,10 +41,14 @@
 # CONFIG_SIBYTE_SENTOSA is not set
 # CONFIG_SIBYTE_BIGSUR is not set
 # CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_ALCHEMY_GPIOINT_AU1000=y
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
 CONFIG_MIPS_MTX1=y
 # CONFIG_MIPS_BOSPORUS is not set
 # CONFIG_MIPS_DB1000 is not set
@@ -53,29 +65,38 @@
 # CONFIG_MIPS_XXS1500 is not set
 CONFIG_SOC_AU1500=y
 CONFIG_SOC_AU1X00=y
+CONFIG_LOONGSON_UART_BASE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CSRC_R4K_LIB=y
 CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
 # CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_GPIO=y
 # CONFIG_CPU_BIG_ENDIAN is not set
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_IRQ_CPU=y
 CONFIG_MIPS_L1_CACHE_SHIFT=5
 
 #
 # CPU selection
 #
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
 CONFIG_CPU_MIPS32_R1=y
 # CONFIG_CPU_MIPS32_R2 is not set
 # CONFIG_CPU_MIPS64_R1 is not set
@@ -88,6 +109,7 @@
 # CONFIG_CPU_TX49XX is not set
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -95,11 +117,14 @@
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
 CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_CPU_MIPS32=y
 CONFIG_CPU_MIPSR1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
 
 #
 # Kernel type
@@ -109,28 +134,36 @@
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_8KB is not set
 # CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
 # CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_CPU_HAS_PREFETCH=y
 CONFIG_MIPS_MT_DISABLED=y
 # CONFIG_MIPS_MT_SMP is not set
 # CONFIG_MIPS_MT_SMTC is not set
 CONFIG_64BIT_PHYS_ADDR=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
 CONFIG_CPU_HAS_SYNC=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_CPU_SUPPORTS_HIGHMEM=y
 CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 # CONFIG_HZ_48 is not set
 # CONFIG_HZ_100 is not set
 # CONFIG_HZ_128 is not set
@@ -148,6 +181,7 @@
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -157,23 +191,49 @@
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_LOCALVERSION=""
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
 CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=17
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
@@ -182,61 +242,104 @@
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
 CONFIG_BLOCK=y
-CONFIG_LBD=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
 # CONFIG_DEFAULT_DEADLINE is not set
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
 #
 CONFIG_HW_HAS_PCI=y
 CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
 CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
 CONFIG_PCCARD=m
-# CONFIG_PCMCIA_DEBUG is not set
 CONFIG_PCMCIA=m
 CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
 CONFIG_CARDBUS=y
 
 #
@@ -251,6 +354,7 @@
 CONFIG_PD6729=m
 CONFIG_I82092=m
 # CONFIG_PCMCIA_AU1X00 is not set
+# CONFIG_PCMCIA_ALCHEMY_DEVBOARD is not set
 CONFIG_PCCARD_NONSTATIC=m
 # CONFIG_HOTPLUG_PCI is not set
 
@@ -258,35 +362,38 @@
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_TRAD_SIGNALS=y
 
 #
 # Power management options
 #
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
 CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
 # CONFIG_APM_EMULATION is not set
-
-#
-# Networking
-#
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
 CONFIG_NET=y
 
 #
 # Networking options
 #
 CONFIG_PACKET=m
-CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_XFRM_USER=m
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
 CONFIG_NET_KEY=m
 # CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
@@ -315,13 +422,89 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_TPROXY is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HL=m
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
 CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
 # CONFIG_IP_VS_DEBUG is not set
 CONFIG_IP_VS_TAB_BITS=12
 
@@ -330,8 +513,10 @@
 #
 CONFIG_IP_VS_PROTO_TCP=y
 CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
 CONFIG_IP_VS_PROTO_ESP=y
 CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
 
 #
 # IPVS scheduler
@@ -351,86 +536,22 @@
 # IPVS application helper
 #
 CONFIG_IP_VS_FTP=m
-CONFIG_IPV6=m
-CONFIG_IPV6_PRIVACY=y
-# CONFIG_IPV6_ROUTER_PREF is not set
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-# CONFIG_IPV6_MIP6 is not set
-CONFIG_INET6_XFRM_TUNNEL=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_TUNNEL=m
-# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_NETLABEL is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# Core Netfilter Configuration
-#
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-# CONFIG_NF_CONNTRACK_ENABLED is not set
-# CONFIG_NF_CONNTRACK is not set
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
-# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-# CONFIG_NETFILTER_XT_MATCH_U32 is not set
-# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
 
 #
 # IP: Netfilter Configuration
 #
+# CONFIG_NF_DEFRAG_IPV4 is not set
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_OWNER=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
 CONFIG_IP_NF_RAW=m
@@ -439,34 +560,29 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 
 #
-# IPv6: Netfilter Configuration (EXPERIMENTAL)
+# IPv6: Netfilter Configuration
 #
 CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_OWNER=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_AH=m
-# CONFIG_IP6_NF_MATCH_MH is not set
 CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+# CONFIG_IP6_NF_MATCH_MH is not set
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
 CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
 CONFIG_IP6_NF_RAW=m
 
 #
 # DECnet: Netfilter Configuration
 #
 CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -475,6 +591,7 @@
 CONFIG_BRIDGE_EBT_AMONG=m
 CONFIG_BRIDGE_EBT_ARP=m
 CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
 CONFIG_BRIDGE_EBT_LIMIT=m
 CONFIG_BRIDGE_EBT_MARK=m
 CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -487,25 +604,25 @@
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
 CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
 CONFIG_IP_DCCP=m
 CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
 
 #
 # DCCP CCIDs Configuration (EXPERIMENTAL)
 #
-CONFIG_IP_DCCP_CCID2=m
 # CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_CCID3=y
 # CONFIG_IP_DCCP_CCID3_DEBUG is not set
 CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=y
 CONFIG_IP_SCTP=m
 # CONFIG_SCTP_DBG_MSG is not set
 # CONFIG_SCTP_DBG_OBJCNT is not set
 # CONFIG_SCTP_HMAC_NONE is not set
 # CONFIG_SCTP_HMAC_SHA1 is not set
 CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
 CONFIG_TIPC=m
 # CONFIG_TIPC_ADVANCED is not set
 # CONFIG_TIPC_DEBUG is not set
@@ -516,8 +633,12 @@
 CONFIG_ATM_MPOA=m
 CONFIG_ATM_BR2684=m
 # CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
 CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
 CONFIG_DECNET=m
 # CONFIG_DECNET_ROUTER is not set
 CONFIG_LLC=y
@@ -535,12 +656,9 @@
 CONFIG_ECONET_AUNUDP=y
 CONFIG_ECONET_NATIVE=y
 CONFIG_WAN_ROUTER=m
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_FIFO=y
 
 #
 # Queueing/Scheduling
@@ -550,7 +668,7 @@
 CONFIG_NET_SCH_HFSC=m
 CONFIG_NET_SCH_ATM=m
 CONFIG_NET_SCH_PRIO=m
-# CONFIG_NET_SCH_RR is not set
+# CONFIG_NET_SCH_MULTIQ is not set
 CONFIG_NET_SCH_RED=m
 CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
@@ -558,6 +676,7 @@
 CONFIG_NET_SCH_GRED=m
 CONFIG_NET_SCH_DSMARK=m
 CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
 CONFIG_NET_SCH_INGRESS=m
 
 #
@@ -574,6 +693,7 @@
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_FLOW is not set
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 CONFIG_NET_EMATCH_CMP=m
@@ -586,10 +706,13 @@
 # CONFIG_NET_ACT_GACT is not set
 # CONFIG_NET_ACT_MIRRED is not set
 # CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
 # CONFIG_NET_ACT_PEDIT is not set
 # CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_CLS_POLICE=y
+# CONFIG_NET_ACT_SKBEDIT is not set
 # CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -613,9 +736,8 @@
 CONFIG_BPQETHER=m
 CONFIG_BAYCOM_SER_FDX=m
 CONFIG_BAYCOM_SER_HDX=m
-CONFIG_BAYCOM_PAR=m
-CONFIG_BAYCOM_EPP=m
 CONFIG_YAM=m
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -657,15 +779,8 @@
 CONFIG_OLD_BELKIN_DONGLE=m
 CONFIG_ACT200L_DONGLE=m
 # CONFIG_KINGSUN_DONGLE is not set
-
-#
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
+# CONFIG_KSDAZZLE_DONGLE is not set
+# CONFIG_KS959_DONGLE is not set
 
 #
 # FIR device drivers
@@ -683,17 +798,17 @@
 CONFIG_BT_BNEP=m
 CONFIG_BT_BNEP_MC_FILTER=y
 CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
 CONFIG_BT_HIDP=m
 
 #
 # Bluetooth device drivers
 #
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
 CONFIG_BT_HCIBCM203X=m
 CONFIG_BT_HCIBPA10X=m
 CONFIG_BT_HCIBFUSB=m
@@ -702,24 +817,19 @@
 CONFIG_BT_HCIBLUECARD=m
 CONFIG_BT_HCIBTUART=m
 CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
 CONFIG_AF_RXRPC=m
 # CONFIG_AF_RXRPC_DEBUG is not set
 # CONFIG_RXKAD is not set
 CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
 
 #
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
 #
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -730,40 +840,43 @@
 #
 # Generic Driver Options
 #
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_SYS_HYPERVISOR is not set
 CONFIG_CONNECTOR=m
-CONFIG_MTD=m
+CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_CONCAT=m
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
 CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
 #
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
 
 #
 # RAM/ROM/Flash chip drivers
 #
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
 # CONFIG_MTD_CFI_ADV_OPTIONS is not set
 CONFIG_MTD_MAP_BANK_WIDTH_1=y
 CONFIG_MTD_MAP_BANK_WIDTH_2=y
@@ -775,206 +888,81 @@
 CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_CFI_I4 is not set
 # CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
 CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
 
 #
 # Mapping drivers for chip access
 #
 CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x4000000
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-# CONFIG_MTD_ALCHEMY is not set
-# CONFIG_MTD_MTX1 is not set
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PCI is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
 
 #
 # Self-contained MTD device drivers
 #
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
 
 #
 # Disk-On-Chip Device Drivers
 #
-CONFIG_MTD_DOC2000=m
-CONFIG_MTD_DOC2001=m
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-CONFIG_MTD_DOCPROBE_ADDRESS=0
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-CONFIG_MTD_NAND_IDS=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-# CONFIG_MTD_NAND_CAFE is not set
-CONFIG_MTD_NAND_NANDSIM=m
-# CONFIG_MTD_NAND_PLATFORM is not set
-CONFIG_MTD_ONENAND=m
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
 
 #
 # UBI - Unsorted block images
 #
 # CONFIG_MTD_UBI is not set
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
-CONFIG_PARPORT_1284=y
-CONFIG_PARPORT_NOT_PC=y
+# CONFIG_PARPORT is not set
 CONFIG_BLK_DEV=y
-CONFIG_PARIDE=m
-
-#
-# Parallel IDE high-level drivers
-#
-CONFIG_PARIDE_PD=m
-CONFIG_PARIDE_PCD=m
-CONFIG_PARIDE_PF=m
-CONFIG_PARIDE_PT=m
-CONFIG_PARIDE_PG=m
-
-#
-# Parallel IDE protocol modules
-#
-CONFIG_PARIDE_ATEN=m
-CONFIG_PARIDE_BPCK=m
-CONFIG_PARIDE_BPCK6=m
-CONFIG_PARIDE_COMM=m
-CONFIG_PARIDE_DSTR=m
-CONFIG_PARIDE_FIT2=m
-CONFIG_PARIDE_FIT3=m
-CONFIG_PARIDE_EPAT=m
-CONFIG_PARIDE_EPATC8=y
-CONFIG_PARIDE_EPIA=m
-CONFIG_PARIDE_FRIQ=m
-CONFIG_PARIDE_FRPW=m
-CONFIG_PARIDE_KBIC=m
-CONFIG_PARIDE_KTTI=m
-CONFIG_PARIDE_ON20=m
-CONFIG_PARIDE_ON26=m
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
 # CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
 CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
+# CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_UB is not set
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-CONFIG_ATA_OVER_ETH=m
-CONFIG_MISC_DEVICES=y
-# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
-CONFIG_SGI_IOC4=m
-CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
-CONFIG_IDE=y
-CONFIG_IDE_MAX_HWIFS=4
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=m
-# CONFIG_IDEDISK_MULTI_MODE is not set
-CONFIG_BLK_DEV_IDECS=m
-# CONFIG_BLK_DEV_DELKIN is not set
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
-CONFIG_BLK_DEV_IDEFLOPPY=m
-CONFIG_BLK_DEV_IDESCSI=m
-# CONFIG_IDE_TASK_IOCTL is not set
-CONFIG_IDE_PROC_FS=y
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=m
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-CONFIG_IDEPCI_PCIBUS_ORDER=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=m
-CONFIG_BLK_DEV_OPTI621=m
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_AEC62XX=m
-CONFIG_BLK_DEV_ALI15X3=m
-# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=m
-CONFIG_BLK_DEV_CMD64X=m
-CONFIG_BLK_DEV_TRIFLEX=m
-CONFIG_BLK_DEV_CY82C693=m
-# CONFIG_BLK_DEV_CS5520 is not set
-CONFIG_BLK_DEV_CS5530=m
-CONFIG_BLK_DEV_HPT34X=m
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=m
-# CONFIG_BLK_DEV_JMICRON is not set
-CONFIG_BLK_DEV_SC1200=m
-CONFIG_BLK_DEV_PIIX=m
-# CONFIG_BLK_DEV_IT8213 is not set
-CONFIG_BLK_DEV_IT821X=m
-CONFIG_BLK_DEV_NS87415=m
-CONFIG_BLK_DEV_PDC202XX_OLD=m
-CONFIG_PDC202XX_BURST=y
-CONFIG_BLK_DEV_PDC202XX_NEW=m
-CONFIG_BLK_DEV_SVWKS=m
-CONFIG_BLK_DEV_SIIMAGE=m
-# CONFIG_BLK_DEV_SLC90E66 is not set
-CONFIG_BLK_DEV_TRM290=m
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_BLK_DEV_TC86C001 is not set
-# CONFIG_IDE_ARM is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
 # CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_TIFM_CORE=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
 
 #
 # SCSI device support
 #
-CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_MOD=m
+# CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=m
 CONFIG_SCSI_DMA=y
 # CONFIG_SCSI_TGT is not set
@@ -985,18 +973,13 @@
 # SCSI support type (disk, tape, CD-ROM)
 #
 CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
 CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
+# CONFIG_CHR_DEV_SCH is not set
 CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_CONSTANTS is not set
 CONFIG_SCSI_LOGGING=y
 # CONFIG_SCSI_SCAN_ASYNC is not set
 CONFIG_SCSI_WAIT_SCAN=m
@@ -1009,198 +992,39 @@
 CONFIG_SCSI_ISCSI_ATTRS=m
 CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_ATA is not set
+CONFIG_SCSI_SAS_HOST_SMP=y
 # CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_LOWLEVEL=y
-CONFIG_ISCSI_TCP=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-CONFIG_AIC7XXX_DEBUG_ENABLE=y
-CONFIG_AIC7XXX_DEBUG_MASK=0
-CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-CONFIG_AIC79XX_DEBUG_ENABLE=y
-CONFIG_AIC79XX_DEBUG_MASK=0
-CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-CONFIG_SCSI_DPT_I2O=m
-CONFIG_SCSI_ARCMSR=m
-CONFIG_MEGARAID_NEWGEN=y
-CONFIG_MEGARAID_MM=m
-CONFIG_MEGARAID_MAILBOX=m
-CONFIG_MEGARAID_LEGACY=m
-CONFIG_MEGARAID_SAS=m
-CONFIG_SCSI_HPTIOP=m
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-# CONFIG_SCSI_INIA100 is not set
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_STEX=m
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_MMIO=y
-CONFIG_SCSI_IPR=m
-# CONFIG_SCSI_IPR_TRACE is not set
-# CONFIG_SCSI_IPR_DUMP is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA_FC=m
-CONFIG_SCSI_QLA_ISCSI=m
-CONFIG_SCSI_LPFC=m
-CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_DC390T=m
-CONFIG_SCSI_NSP32=m
-CONFIG_SCSI_DEBUG=m
-# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
 # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
-CONFIG_ATA=m
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_SATA_AHCI=m
-CONFIG_SATA_SVW=m
-CONFIG_ATA_PIIX=m
-CONFIG_SATA_MV=m
-CONFIG_SATA_NV=m
-CONFIG_PDC_ADMA=m
-CONFIG_SATA_QSTOR=m
-CONFIG_SATA_PROMISE=m
-CONFIG_SATA_SX4=m
-CONFIG_SATA_SIL=m
-CONFIG_SATA_SIL24=m
-CONFIG_SATA_SIS=m
-CONFIG_SATA_ULI=m
-CONFIG_SATA_VIA=m
-CONFIG_SATA_VITESSE=m
-# CONFIG_SATA_INIC162X is not set
-# CONFIG_PATA_ALI is not set
-# CONFIG_PATA_AMD is not set
-# CONFIG_PATA_ARTOP is not set
-# CONFIG_PATA_ATIIXP is not set
-# CONFIG_PATA_CMD640_PCI is not set
-# CONFIG_PATA_CMD64X is not set
-CONFIG_PATA_CS5520=m
-# CONFIG_PATA_CS5530 is not set
-# CONFIG_PATA_CYPRESS is not set
-CONFIG_PATA_EFAR=m
-CONFIG_ATA_GENERIC=m
-# CONFIG_PATA_HPT366 is not set
-# CONFIG_PATA_HPT37X is not set
-# CONFIG_PATA_HPT3X2N is not set
-# CONFIG_PATA_HPT3X3 is not set
-# CONFIG_PATA_IT821X is not set
-# CONFIG_PATA_IT8213 is not set
-CONFIG_PATA_JMICRON=m
-CONFIG_PATA_TRIFLEX=m
-# CONFIG_PATA_MARVELL is not set
-CONFIG_PATA_MPIIX=m
-# CONFIG_PATA_OLDPIIX is not set
-CONFIG_PATA_NETCELL=m
-# CONFIG_PATA_NS87410 is not set
-# CONFIG_PATA_OPTI is not set
-# CONFIG_PATA_OPTIDMA is not set
-CONFIG_PATA_PCMCIA=m
-# CONFIG_PATA_PDC_OLD is not set
-# CONFIG_PATA_RADISYS is not set
-CONFIG_PATA_RZ1000=m
-# CONFIG_PATA_SC1200 is not set
-# CONFIG_PATA_SERVERWORKS is not set
-CONFIG_PATA_PDC2027X=m
-CONFIG_PATA_SIL680=m
-CONFIG_PATA_SIS=m
-CONFIG_PATA_VIA=m
-CONFIG_PATA_WINBOND=m
-# CONFIG_PATA_PLATFORM is not set
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-# CONFIG_MD_RAID5_RESHAPE is not set
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_DEBUG is not set
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_EMC=m
-# CONFIG_DM_MULTIPATH_RDAC is not set
-# CONFIG_DM_DELAY is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
-CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
-# CONFIG_FUSION_LOGGING is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
 
 #
 # IEEE 1394 (FireWire) support
 #
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
 # CONFIG_FIREWIRE is not set
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-
-#
-# Controllers
-#
-CONFIG_IEEE1394_PCILYNX=m
-CONFIG_IEEE1394_OHCI1394=m
-
-#
-# Protocols
-#
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_I2O=m
-CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
-CONFIG_I2O_EXT_ADAPTEC=y
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_CONFIG_OLD_IOCTL=y
-CONFIG_I2O_BUS=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_IFB is not set
 CONFIG_DUMMY=m
 CONFIG_BONDING=m
 # CONFIG_MACVLAN is not set
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
+# CONFIG_VETH is not set
 CONFIG_ARCNET=m
 CONFIG_ARCNET_1201=m
 CONFIG_ARCNET_1051=m
@@ -1225,9 +1049,11 @@
 CONFIG_SMSC_PHY=m
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
-CONFIG_FIXED_PHY=m
-# CONFIG_FIXED_MII_10_FDX is not set
-# CONFIG_FIXED_MII_100_FDX is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=m
 # CONFIG_AX88796 is not set
@@ -1240,8 +1066,12 @@
 CONFIG_TYPHOON=m
 # CONFIG_SMC91X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
 CONFIG_NET_TULIP=y
 CONFIG_DE2104X=m
+CONFIG_DE2104X_DSL=0
 CONFIG_TULIP=m
 # CONFIG_TULIP_MWI is not set
 # CONFIG_TULIP_MMIO is not set
@@ -1251,21 +1081,26 @@
 CONFIG_DM9102=m
 CONFIG_ULI526X=m
 CONFIG_PCMCIA_XIRCOM=m
-# CONFIG_PCMCIA_XIRTULIP is not set
 CONFIG_HP100=m
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 CONFIG_PCNET32=m
-# CONFIG_PCNET32_NAPI is not set
 CONFIG_AMD8111_ETH=m
-# CONFIG_AMD8111E_NAPI is not set
 CONFIG_ADAPTEC_STARFIRE=m
-# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
+# CONFIG_KSZ884X_PCI is not set
 CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
 CONFIG_FORCEDETH=m
 # CONFIG_FORCEDETH_NAPI is not set
 # CONFIG_TC35815 is not set
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
 CONFIG_E100=m
 CONFIG_FEALNX=m
 CONFIG_NATSEMI=m
@@ -1276,52 +1111,71 @@
 # CONFIG_8139TOO_TUNE_TWISTER is not set
 CONFIG_8139TOO_8129=y
 # CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
 CONFIG_SIS900=m
 CONFIG_EPIC100=m
+# CONFIG_SMSC9420 is not set
 CONFIG_SUNDANCE=m
 # CONFIG_SUNDANCE_MMIO is not set
 CONFIG_TLAN=m
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 CONFIG_VIA_RHINE=m
 # CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_VIA_RHINE_NAPI is not set
 # CONFIG_SC92031 is not set
-CONFIG_NET_POCKET=y
-CONFIG_DE600=m
-CONFIG_DE620=m
+# CONFIG_ATL2 is not set
 CONFIG_NETDEV_1000=y
 CONFIG_ACENIC=m
 # CONFIG_ACENIC_OMIT_TIGON_I is not set
 CONFIG_DL2K=m
 CONFIG_E1000=m
-# CONFIG_E1000_NAPI is not set
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
 CONFIG_NS83820=m
 CONFIG_HAMACHI=m
 CONFIG_YELLOWFIN=m
 CONFIG_R8169=m
-# CONFIG_R8169_NAPI is not set
 CONFIG_R8169_VLAN=y
 CONFIG_SIS190=m
 CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
 CONFIG_SKY2=m
-CONFIG_SK98LIN=m
+# CONFIG_SKY2_DEBUG is not set
 CONFIG_VIA_VELOCITY=m
 CONFIG_TIGON3=m
 CONFIG_BNX2=m
+# CONFIG_CNIC is not set
 CONFIG_QLA3XXX=m
 # CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
 CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
 CONFIG_CHELSIO_T1=m
 # CONFIG_CHELSIO_T1_1G is not set
-CONFIG_CHELSIO_T1_NAPI=y
+CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_ENIC is not set
+# CONFIG_IXGBE is not set
 CONFIG_IXGB=m
-# CONFIG_IXGB_NAPI is not set
 CONFIG_S2IO=m
-# CONFIG_S2IO_NAPI is not set
+# CONFIG_VXGE is not set
 CONFIG_MYRI10GE=m
 # CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
 # CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
+# CONFIG_BE2NET is not set
 CONFIG_TR=y
 CONFIG_IBMOL=m
 CONFIG_IBMLS=m
@@ -1329,12 +1183,18 @@
 CONFIG_TMS380TR=m
 CONFIG_TMSPCI=m
 CONFIG_ABYSS=m
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
 
 #
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
 #
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
 
 #
 # USB Network Adapters
@@ -1343,11 +1203,13 @@
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET_MII=m
 CONFIG_USB_USBNET=m
 CONFIG_USB_NET_AX8817X=m
 CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
 # CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
 CONFIG_USB_NET_GL620A=m
 CONFIG_USB_NET_NET1080=m
 CONFIG_USB_NET_PLUSB=m
@@ -1361,6 +1223,9 @@
 CONFIG_USB_EPSON2888=y
 # CONFIG_USB_KC2190 is not set
 CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
 CONFIG_NET_PCMCIA=y
 CONFIG_PCMCIA_3C589=m
 CONFIG_PCMCIA_3C574=m
@@ -1383,16 +1248,6 @@
 CONFIG_HDLC_X25=m
 CONFIG_PCI200SYN=m
 CONFIG_WANXL=m
-CONFIG_PC300=m
-CONFIG_PC300_MLPPP=y
-
-#
-# Cyclades-PC300 MLPPP support is disabled.
-#
-
-#
-# Refer to the file README.mlppp, provided by PC300 package.
-#
 # CONFIG_PC300TOO is not set
 CONFIG_FARSYNC=m
 CONFIG_DSCC4=m
@@ -1428,15 +1283,13 @@
 # CONFIG_ATM_HORIZON_DEBUG is not set
 CONFIG_ATM_IA=m
 # CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E_MAYBE=m
-CONFIG_ATM_FORE200E_PCA=y
-CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
+CONFIG_ATM_FORE200E=m
 # CONFIG_ATM_FORE200E_USE_TASKLET is not set
 CONFIG_ATM_FORE200E_TX_RETRY=16
 CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_FORE200E=m
 CONFIG_ATM_HE=m
 CONFIG_ATM_HE_USE_SUNI=y
+# CONFIG_ATM_SOLOS is not set
 CONFIG_FDDI=y
 CONFIG_DEFXX=m
 # CONFIG_DEFXX_MMIO is not set
@@ -1444,7 +1297,6 @@
 CONFIG_HIPPI=y
 CONFIG_ROADRUNNER=m
 # CONFIG_ROADRUNNER_LARGE_RINGS is not set
-CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_MULTILINK=y
 CONFIG_PPP_FILTER=y
@@ -1462,219 +1314,53 @@
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_NET_FC=y
-CONFIG_SHAPER=m
 CONFIG_NETCONSOLE=m
+# CONFIG_NETCONSOLE_DYNAMIC is not set
 CONFIG_NETPOLL=y
 # CONFIG_NETPOLL_TRAP is not set
 CONFIG_NET_POLL_CONTROLLER=y
-CONFIG_ISDN=m
-CONFIG_ISDN_I4L=m
-CONFIG_ISDN_PPP=y
-CONFIG_ISDN_PPP_VJ=y
-CONFIG_ISDN_MPP=y
-CONFIG_IPPP_FILTER=y
-CONFIG_ISDN_PPP_BSDCOMP=m
-CONFIG_ISDN_AUDIO=y
-CONFIG_ISDN_TTY_FAX=y
-CONFIG_ISDN_X25=y
-
-#
-# ISDN feature submodules
-#
-# CONFIG_ISDN_DRV_LOOP is not set
-CONFIG_ISDN_DIVERSION=m
-
-#
-# ISDN4Linux hardware drivers
-#
-
-#
-# Passive cards
-#
-CONFIG_ISDN_DRV_HISAX=m
-
-#
-# D-channel protocol features
-#
-CONFIG_HISAX_EURO=y
-CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
-CONFIG_HISAX_1TR6=y
-CONFIG_HISAX_NI1=y
-CONFIG_HISAX_MAX_CARDS=8
-
-#
-# HiSax supported cards
-#
-CONFIG_HISAX_16_3=y
-CONFIG_HISAX_TELESPCI=y
-CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_FRITZPCI=y
-CONFIG_HISAX_AVM_A1_PCMCIA=y
-CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_NETJET=y
-CONFIG_HISAX_NETJET_U=y
-CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_BKM_A4T=y
-CONFIG_HISAX_SCT_QUADRO=y
-CONFIG_HISAX_GAZEL=y
-CONFIG_HISAX_HFC_PCI=y
-CONFIG_HISAX_W6692=y
-CONFIG_HISAX_HFC_SX=y
-CONFIG_HISAX_ENTERNOW_PCI=y
-# CONFIG_HISAX_DEBUG is not set
-
-#
-# HiSax PCMCIA card service modules
-#
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
-
-#
-# HiSax sub driver modules
-#
-CONFIG_HISAX_ST5481=m
-CONFIG_HISAX_HFCUSB=m
-CONFIG_HISAX_HFC4S8S=m
-CONFIG_HISAX_FRITZ_PCIPNP=m
-CONFIG_HISAX_HDLC=y
-
-#
-# Active cards
-#
-# CONFIG_HYSDN is not set
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-# CONFIG_GIGASET_M101 is not set
-# CONFIG_GIGASET_DEBUG is not set
-# CONFIG_GIGASET_UNDOCREQ is not set
-CONFIG_ISDN_CAPI=m
-CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-CONFIG_CAPI_TRACE=y
-CONFIG_ISDN_CAPI_MIDDLEWARE=y
-CONFIG_ISDN_CAPI_CAPI20=m
-CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-CONFIG_ISDN_CAPI_CAPIFS=m
-CONFIG_ISDN_CAPI_CAPIDRV=m
-
-#
-# CAPI hardware drivers
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_PHONE=m
-CONFIG_PHONE_IXJ=m
-CONFIG_PHONE_IXJ_PCMCIA=m
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
 
 #
 # Input device support
 #
 CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_FF_MEMLESS is not set
 # CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
 
 #
 # Userland interfaces
 #
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_TSDEV=m
-CONFIG_INPUT_TSDEV_SCREEN_X=240
-CONFIG_INPUT_TSDEV_SCREEN_Y=320
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
 
 #
 # Input Device Drivers
 #
 CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
 CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-CONFIG_MOUSE_SERIAL=m
-# CONFIG_MOUSE_APPLETOUCH is not set
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-CONFIG_JOYSTICK_DB9=m
-CONFIG_JOYSTICK_GAMECON=m
-CONFIG_JOYSTICK_TURBOGRAFX=m
-CONFIG_JOYSTICK_JOYDUMP=m
-# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-# CONFIG_TOUCHSCREEN_FUJITSU is not set
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_MK712=m
-CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
-CONFIG_TOUCHSCREEN_TOUCHWIN=m
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
-# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_PCSPKR=m
-# CONFIG_INPUT_ATI_REMOTE is not set
-# CONFIG_INPUT_ATI_REMOTE2 is not set
-# CONFIG_INPUT_KEYSPAN_REMOTE is not set
-# CONFIG_INPUT_POWERMATE is not set
-# CONFIG_INPUT_YEALINK is not set
-CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
 
 #
 # Hardware I/O ports
@@ -1682,10 +1368,10 @@
 CONFIG_SERIO=y
 CONFIG_SERIO_I8042=y
 CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_PARKBD=m
 CONFIG_SERIO_PCIPS2=m
 CONFIG_SERIO_LIBPS2=y
 CONFIG_SERIO_RAW=m
+# CONFIG_SERIO_ALTERA_PS2 is not set
 CONFIG_GAMEPORT=m
 CONFIG_GAMEPORT_NS558=m
 CONFIG_GAMEPORT_L4=m
@@ -1696,30 +1382,13 @@
 # Character devices
 #
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_COMPUTONE is not set
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-# CONFIG_CYZ_INTR is not set
-CONFIG_DIGIEPCA=m
-# CONFIG_MOXA_INTELLIO is not set
-CONFIG_MOXA_SMARTIO=m
-# CONFIG_MOXA_SMARTIO_NEW is not set
-# CONFIG_ISI is not set
-CONFIG_SYNCLINKMP=m
-CONFIG_SYNCLINK_GT=m
-CONFIG_N_HDLC=m
-# CONFIG_RISCOM8 is not set
-CONFIG_SPECIALIX=m
-# CONFIG_SPECIALIX_RTSCTS is not set
-CONFIG_SX=m
-# CONFIG_RIO is not set
-CONFIG_STALDRV=y
-# CONFIG_STALLION is not set
-# CONFIG_ISTALLION is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
 
 #
 # Serial drivers
@@ -1741,161 +1410,128 @@
 #
 CONFIG_SERIAL_CORE=m
 CONFIG_SERIAL_JSM=m
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
-CONFIG_PPDEV=m
-CONFIG_TIPAR=m
-CONFIG_IPMI_HANDLER=m
-# CONFIG_IPMI_PANIC_EVENT is not set
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-
-#
-# Watchdog Device Drivers
-#
-CONFIG_SOFT_WATCHDOG=m
-# CONFIG_WDT_MTX1 is not set
-
-#
-# PCI-based Watchdog Cards
-#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-CONFIG_WDT_501_PCI=y
-
-#
-# USB-based Watchdog Cards
-#
-CONFIG_USBPCWATCHDOG=m
+# CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
-CONFIG_RTC=y
-CONFIG_R3964=m
-CONFIG_APPLICOM=m
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_VIA=m
-CONFIG_DRM_SAVAGE=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
 
 #
 # PCMCIA character devices
 #
-CONFIG_SYNCLINK_CS=m
-CONFIG_CARDMAN_4000=m
-CONFIG_CARDMAN_4040=m
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
-CONFIG_TCG_TPM=m
-CONFIG_TCG_ATMEL=m
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
 CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
+CONFIG_I2C_HELPER_AUTO=y
 CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCF=m
-CONFIG_I2C_ALGOPCA=m
 
 #
 # I2C Hardware Bus support
 #
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_I810=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_OCORES=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
-# CONFIG_I2C_SIMTEC is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-# CONFIG_I2C_TAOS_EVM is not set
-CONFIG_I2C_STUB=m
-# CONFIG_I2C_TINY_USB is not set
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
 
 #
-# Miscellaneous I2C Chip support
+# PC SMBus host controller drivers
 #
-CONFIG_SENSORS_DS1337=m
-CONFIG_SENSORS_DS1374=m
-# CONFIG_DS1682 is not set
-CONFIG_EEPROM_LEGACY=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCA9539=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_EEPROM_MAX6875=m
-# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
 
 #
-# SPI support
+# PPS support
 #
-CONFIG_SPI=y
-CONFIG_SPI_MASTER=y
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
 
 #
-# SPI Master Controller Drivers
+# Memory mapped GPIO expanders:
 #
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_BUTTERFLY=m
-# CONFIG_SPI_LM70_LLP is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
 
 #
-# SPI Protocol Masters
+# I2C GPIO expanders:
 #
-# CONFIG_EEPROM_AT25 is not set
-# CONFIG_SPI_SPIDEV is not set
-# CONFIG_SPI_TLE62X0 is not set
-CONFIG_W1=m
-CONFIG_W1_CON=y
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
 
 #
-# 1-wire Bus Masters
+# PCI GPIO expanders:
 #
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
 
 #
-# 1-wire Slaves
+# SPI GPIO expanders:
 #
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-CONFIG_W1_SLAVE_DS2433=m
-# CONFIG_W1_SLAVE_DS2433_CRC is not set
-# CONFIG_W1_SLAVE_DS2760 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 CONFIG_HWMON_VID=m
-CONFIG_SENSORS_ABITUGURU=m
-# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
 CONFIG_SENSORS_ADM1021=m
 CONFIG_SENSORS_ADM1025=m
@@ -1903,17 +1539,23 @@
 # CONFIG_SENSORS_ADM1029 is not set
 CONFIG_SENSORS_ADM1031=m
 CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_ASB100=m
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
 CONFIG_SENSORS_ATXP1=m
 CONFIG_SENSORS_DS1621=m
+# CONFIG_SENSORS_I5K_AMB is not set
 CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_FSCPOS=m
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
 CONFIG_SENSORS_GL518SM=m
 CONFIG_SENSORS_GL520SM=m
 CONFIG_SENSORS_IT87=m
 CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM70=m
+# CONFIG_SENSORS_LM73 is not set
 CONFIG_SENSORS_LM75=m
 CONFIG_SENSORS_LM77=m
 CONFIG_SENSORS_LM78=m
@@ -1924,16 +1566,25 @@
 CONFIG_SENSORS_LM90=m
 CONFIG_SENSORS_LM92=m
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
 CONFIG_SENSORS_MAX1619=m
 # CONFIG_SENSORS_MAX6650 is not set
 CONFIG_SENSORS_PC87360=m
 # CONFIG_SENSORS_PC87427 is not set
+CONFIG_SENSORS_PCF8591=m
+# CONFIG_SENSORS_SHT15 is not set
 CONFIG_SENSORS_SIS5595=m
 # CONFIG_SENSORS_DME1737 is not set
 CONFIG_SENSORS_SMSC47M1=m
 CONFIG_SENSORS_SMSC47M192=m
 CONFIG_SENSORS_SMSC47B397=m
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
 # CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
 CONFIG_SENSORS_VIA686A=m
 CONFIG_SENSORS_VT1211=m
 CONFIG_SENSORS_VT8231=m
@@ -1942,370 +1593,94 @@
 CONFIG_SENSORS_W83792D=m
 # CONFIG_SENSORS_W83793 is not set
 CONFIG_SENSORS_W83L785TS=m
+# CONFIG_SENSORS_W83L786NG is not set
 CONFIG_SENSORS_W83627HF=m
 CONFIG_SENSORS_W83627EHF=m
-# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_WDT_MTX1=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+# CONFIG_SSB_B43_PCI_BRIDGE is not set
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+# CONFIG_SSB_PCMCIAHOST is not set
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+# CONFIG_SSB_SDIOHOST is not set
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_MIPS is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=y
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_TVAUDIO=m
-CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TDA9875=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_SAA7110=m
-CONFIG_VIDEO_SAA7111=m
-CONFIG_VIDEO_SAA7114=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_VPX3220=m
-CONFIG_VIDEO_CX25840=m
-CONFIG_VIDEO_CX2341X=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-CONFIG_VIDEO_VIVI=m
-CONFIG_VIDEO_BT848=m
-CONFIG_VIDEO_BT848_DVB=y
-CONFIG_VIDEO_SAA6588=m
-CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_W9966=m
-CONFIG_VIDEO_CPIA=m
-CONFIG_VIDEO_CPIA_PP=m
-CONFIG_VIDEO_CPIA_USB=m
-CONFIG_VIDEO_CPIA2=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-# CONFIG_TUNER_TEA5761 is not set
-CONFIG_VIDEO_STRADIS=m
-CONFIG_VIDEO_ZORAN_ZR36060=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_ZORAN_AVS6EYES=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_SAA7134_ALSA=m
-CONFIG_VIDEO_SAA7134_OSS=m
-CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_CX88_ALSA=m
-CONFIG_VIDEO_CX88_BLACKBIRD=m
-CONFIG_VIDEO_CX88_DVB=m
-CONFIG_VIDEO_CX88_VP3054=m
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-CONFIG_V4L_USB_DRIVERS=y
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_29XXX=y
-CONFIG_VIDEO_PVRUSB2_24XXX=y
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_EM28XX=m
-# CONFIG_VIDEO_USBVISION is not set
-CONFIG_VIDEO_USBVIDEO=m
-CONFIG_USB_VICAM=m
-CONFIG_USB_IBMCAM=m
-CONFIG_USB_KONICAWC=m
-CONFIG_USB_QUICKCAM_MESSENGER=m
-CONFIG_USB_ET61X251=m
-CONFIG_VIDEO_OVCAMCHIP=m
-CONFIG_USB_W9968CF=m
-# CONFIG_USB_OV511 is not set
-CONFIG_USB_SE401=m
-CONFIG_USB_SN9C102=m
-CONFIG_USB_STV680=m
-CONFIG_USB_ZC0301=m
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-# CONFIG_USB_ZR364XX is not set
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
-CONFIG_USB_DSBR=m
-CONFIG_DVB_CORE=m
-CONFIG_DVB_CORE_ATTACH=y
-CONFIG_DVB_CAPTURE_DRIVERS=y
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-
-#
-# Supported USB Adapters
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-# CONFIG_DVB_USB_M920X is not set
-# CONFIG_DVB_USB_GL861 is not set
-# CONFIG_DVB_USB_AU6610 is not set
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-# CONFIG_DVB_USB_TTUSB2 is not set
-CONFIG_DVB_USB_DTT200U=m
-# CONFIG_DVB_USB_OPERA1 is not set
-# CONFIG_DVB_USB_AF9005 is not set
-CONFIG_DVB_TTUSB_BUDGET=m
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_DVB_CINERGYT2=m
-CONFIG_DVB_CINERGYT2_TUNING=y
-CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
-CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
-CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
-CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
-CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-CONFIG_DVB_B2C2_FLEXCOP_USB=m
-# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-
-#
-# Supported BT878 Adapters
-#
-CONFIG_DVB_BT8XX=m
-
-#
-# Supported Pluto2 Adapters
-#
-CONFIG_DVB_PLUTO2=m
-
-#
-# Supported DVB Frontends
-#
-
-#
-# Customise DVB Frontends
-#
-# CONFIG_DVB_FE_CUSTOMISE is not set
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_VES1X93=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_TDA10086=m
-
-#
-# DVB-T (terrestrial) frontends
-#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
-
-#
-# DVB-C (cable) frontends
-#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_TDA10023=m
-CONFIG_DVB_STV0297=m
-
-#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
-#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
-
-#
-# Tuners/PLL support
-#
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TDA827X=m
-# CONFIG_DVB_TUNER_QT1010 is not set
-CONFIG_DVB_TUNER_MT2060=m
-
-#
-# Miscellaneous devices
-#
-CONFIG_DVB_LNBP21=m
-CONFIG_DVB_ISL6421=m
-CONFIG_DVB_TUA6100=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BUF_DVB=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR_I2C=m
-CONFIG_VIDEO_IR=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
 
 #
 # Graphics support
 #
+# CONFIG_VGA_ARB is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 
 #
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-CONFIG_VGASTATE=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_CFB_FILLRECT=m
-CONFIG_FB_CFB_COPYAREA=m
-CONFIG_FB_CFB_IMAGEBLIT=m
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-CONFIG_FB_CIRRUS=m
-CONFIG_FB_PM2=m
-CONFIG_FB_PM2_FIFO_DISCONNECT=y
-CONFIG_FB_CYBER2000=m
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_S1D13XXX=m
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_MATROX_MULTIHEAD=y
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY128_BACKLIGHT=y
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GENERIC_LCD=y
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-# CONFIG_FB_S3 is not set
-CONFIG_FB_SAVAGE=m
-CONFIG_FB_SAVAGE_I2C=y
-CONFIG_FB_SAVAGE_ACCEL=y
-CONFIG_FB_SIS=m
-CONFIG_FB_SIS_300=y
-CONFIG_FB_SIS_315=y
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-# CONFIG_FB_3DFX_ACCEL is not set
-CONFIG_FB_VOODOO1=m
-# CONFIG_FB_VT8623 is not set
-CONFIG_FB_TRIDENT=m
-# CONFIG_FB_TRIDENT_ACCEL is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_VIRTUAL is not set
 
 #
 # Console display driver support
 #
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+# CONFIG_VGA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=m
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_LOGO is not set
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -2318,32 +1693,29 @@
 CONFIG_SND_PCM_OSS=m
 CONFIG_SND_PCM_OSS_PLUGINS=y
 CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_RTCTIMER=m
-CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SUPPORT_OLD_API=y
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_OPL3_LIB_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+CONFIG_SND_EMU10K1_SEQ=m
 CONFIG_SND_MPU401_UART=m
 CONFIG_SND_OPL3_LIB=m
 CONFIG_SND_VX_LIB=m
 CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
 CONFIG_SND_DUMMY=m
 CONFIG_SND_VIRMIDI=m
 CONFIG_SND_MTPAV=m
-CONFIG_SND_MTS64=m
 CONFIG_SND_SERIAL_U16550=m
 CONFIG_SND_MPU401=m
-# CONFIG_SND_PORTMAN2X4 is not set
-
-#
-# PCI devices
-#
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_PCI=y
 CONFIG_SND_AD1889=m
 CONFIG_SND_ALS300=m
 CONFIG_SND_ALI5451=m
@@ -2352,14 +1724,18 @@
 CONFIG_SND_AU8810=m
 CONFIG_SND_AU8820=m
 CONFIG_SND_AU8830=m
+# CONFIG_SND_AW2 is not set
 CONFIG_SND_AZT3328=m
 CONFIG_SND_BT87X=m
 # CONFIG_SND_BT87X_OVERCLOCK is not set
 CONFIG_SND_CA0106=m
 CONFIG_SND_CMIPCI=m
+# CONFIG_SND_OXYGEN is not set
 CONFIG_SND_CS4281=m
 CONFIG_SND_CS46XX=m
 CONFIG_SND_CS46XX_NEW_DSP=y
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
 CONFIG_SND_DARLA20=m
 CONFIG_SND_GINA20=m
 CONFIG_SND_LAYLA20=m
@@ -2372,6 +1748,8 @@
 CONFIG_SND_INDIGO=m
 CONFIG_SND_INDIGOIO=m
 CONFIG_SND_INDIGODJ=m
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
 CONFIG_SND_EMU10K1=m
 CONFIG_SND_EMU10K1X=m
 CONFIG_SND_ENS1370=m
@@ -2379,19 +1757,36 @@
 CONFIG_SND_ES1938=m
 CONFIG_SND_ES1968=m
 CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X_BOOL=y
-CONFIG_SND_FM801_TEA575X=m
 CONFIG_SND_HDA_INTEL=m
+# CONFIG_SND_HDA_HWDEP is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
+# CONFIG_SND_HDA_INPUT_JACK is not set
+# CONFIG_SND_HDA_PATCH_LOADER is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_CODEC_CIRRUS=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CA0110=y
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+# CONFIG_SND_HDA_POWER_SAVE is not set
 CONFIG_SND_HDSP=m
 CONFIG_SND_HDSPM=m
+# CONFIG_SND_HIFIER is not set
 CONFIG_SND_ICE1712=m
 CONFIG_SND_ICE1724=m
 CONFIG_SND_INTEL8X0=m
 CONFIG_SND_INTEL8X0M=m
 CONFIG_SND_KORG1212=m
-CONFIG_SND_KORG1212_FIRMWARE_IN_KERNEL=y
+# CONFIG_SND_LX6464ES is not set
 CONFIG_SND_MAESTRO3=m
-CONFIG_SND_MAESTRO3_FIRMWARE_IN_KERNEL=y
 CONFIG_SND_MIXART=m
 CONFIG_SND_NM256=m
 CONFIG_SND_PCXHR=m
@@ -2403,55 +1798,30 @@
 CONFIG_SND_TRIDENT=m
 CONFIG_SND_VIA82XX=m
 CONFIG_SND_VIA82XX_MODEM=m
+# CONFIG_SND_VIRTUOSO is not set
 CONFIG_SND_VX222=m
 CONFIG_SND_YMFPCI=m
-CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
-# CONFIG_SND_AC97_POWER_SAVE is not set
-
-#
-# ALSA MIPS devices
-#
+CONFIG_SND_MIPS=y
 # CONFIG_SND_AU1X00 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
 CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
 # CONFIG_SND_USB_CAIAQ is not set
-
-#
-# PCMCIA devices
-#
+CONFIG_SND_PCMCIA=y
 CONFIG_SND_VXPOCKET=m
 CONFIG_SND_PDAUDIOCF=m
-
-#
-# System on Chip audio support
-#
 # CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# Open Sound System
-#
 CONFIG_SOUND_PRIME=m
-CONFIG_SOUND_TRIDENT=m
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
 CONFIG_AC97_BUS=m
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
 
 #
 # USB Input Devices
 #
 CONFIG_USB_HID=m
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
 CONFIG_USB_HIDDEV=y
 
 #
@@ -2459,12 +1829,50 @@
 #
 CONFIG_USB_KBD=m
 CONFIG_USB_MOUSE=m
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_ZEROPLUS is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
 CONFIG_USB=m
 # CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
 
 #
 # Miscellaneous USB options
@@ -2472,19 +1880,27 @@
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_DEVICE_CLASS=y
 # CONFIG_USB_DYNAMIC_MINORS is not set
-CONFIG_USB_SUSPEND=y
-# CONFIG_USB_PERSIST is not set
 # CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
 #
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
 CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_SPLIT_ISO=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
 CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_SSB is not set
 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
 # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
 CONFIG_USB_OHCI_LITTLE_ENDIAN=y
@@ -2493,32 +1909,38 @@
 CONFIG_USB_SL811_HCD=m
 CONFIG_USB_SL811_CS=m
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
 
 #
 # USB Device Class drivers
 #
 CONFIG_USB_ACM=m
 CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 
 #
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
 #
 CONFIG_USB_STORAGE=m
 # CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_KARMA=m
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
 CONFIG_USB_LIBUSUAL=y
 
 #
@@ -2526,25 +1948,20 @@
 #
 CONFIG_USB_MDC800=m
 CONFIG_USB_MICROTEK=m
-CONFIG_USB_MON=y
 
 #
 # USB port drivers
 #
-CONFIG_USB_USS720=m
-
-#
-# USB Serial Converter support
-#
 CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
 CONFIG_USB_SERIAL_ARK3116=m
 CONFIG_USB_SERIAL_BELKIN=m
+# CONFIG_USB_SERIAL_CH341 is not set
 CONFIG_USB_SERIAL_WHITEHEAT=m
 CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
 CONFIG_USB_SERIAL_CYPRESS_M8=m
 CONFIG_USB_SERIAL_EMPEG=m
 CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -2556,6 +1973,7 @@
 CONFIG_USB_SERIAL_EDGEPORT_TI=m
 CONFIG_USB_SERIAL_GARMIN=m
 CONFIG_USB_SERIAL_IPW=m
+# CONFIG_USB_SERIAL_IUU is not set
 CONFIG_USB_SERIAL_KEYSPAN_PDA=m
 CONFIG_USB_SERIAL_KEYSPAN=m
 # CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
@@ -2575,20 +1993,27 @@
 CONFIG_USB_SERIAL_MCT_U232=m
 CONFIG_USB_SERIAL_MOS7720=m
 CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
 CONFIG_USB_SERIAL_NAVMAN=m
 CONFIG_USB_SERIAL_PL2303=m
 # CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
 CONFIG_USB_SERIAL_HP4X=m
 CONFIG_USB_SERIAL_SAFE=m
 # CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
 CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
 CONFIG_USB_SERIAL_TI=m
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OPTION=m
 CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
 # CONFIG_USB_SERIAL_DEBUG is not set
-CONFIG_USB_EZUSB=y
 
 #
 # USB Miscellaneous drivers
@@ -2596,18 +2021,13 @@
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-# CONFIG_USB_BERRY_CHARGE is not set
 CONFIG_USB_LED=m
 CONFIG_USB_CYPRESS_CY7C63=m
 CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
 CONFIG_USB_IDMOUSE=m
 CONFIG_USB_FTDI_ELAN=m
 CONFIG_USB_APPLEDISPLAY=m
@@ -2617,86 +2037,113 @@
 CONFIG_USB_TRANCEVIBRATOR=m
 # CONFIG_USB_IOWARRIOR is not set
 CONFIG_USB_TEST=m
-
-#
-# USB DSL modem support
-#
+# CONFIG_USB_ISIGHTFW is not set
 CONFIG_USB_ATM=m
 CONFIG_USB_SPEEDTOUCH=m
 CONFIG_USB_CXACRU=m
 CONFIG_USB_UEAGLEATM=m
 CONFIG_USB_XUSBATM=m
-
-#
-# USB Gadget Support
-#
 CONFIG_USB_GADGET=m
 # CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
 CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
 # CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_LH7A40X is not set
 # CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
 # CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
 # CONFIG_USB_GADGET_DUMMY_HCD is not set
 CONFIG_USB_GADGET_DUALSPEED=y
 CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
 CONFIG_USB_ETH=m
 CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
 # CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_MIDI_GADGET=m
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
 CONFIG_MMC=m
 # CONFIG_MMC_DEBUG is not set
 # CONFIG_MMC_UNSAFE_RESUME is not set
 
 #
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
 #
 CONFIG_MMC_BLOCK=m
 CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
 
 #
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
 #
 CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PCI is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
 CONFIG_MMC_TIFM_SD=m
+# CONFIG_MMC_SDRICOH_CS is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
 
 #
 # LED drivers
 #
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-# CONFIG_LEDS_TRIGGERS is not set
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_USER_MAD=m
-CONFIG_INFINIBAND_USER_ACCESS=m
-CONFIG_INFINIBAND_USER_MEM=y
-CONFIG_INFINIBAND_ADDR_TRANS=y
-CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_MTHCA_DEBUG=y
-CONFIG_INFINIBAND_AMSO1100=m
-CONFIG_INFINIBAND_AMSO1100_DEBUG=y
-# CONFIG_MLX4_INFINIBAND is not set
-CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_CM is not set
-CONFIG_INFINIBAND_IPOIB_DEBUG=y
-# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
-CONFIG_INFINIBAND_SRP=m
-CONFIG_INFINIBAND_ISER=m
-CONFIG_RTC_LIB=m
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=m
 
 #
@@ -2712,6 +2159,7 @@
 # I2C RTC drivers
 #
 CONFIG_RTC_DRV_DS1307=m
+# CONFIG_RTC_DRV_DS1374 is not set
 CONFIG_RTC_DRV_DS1672=m
 # CONFIG_RTC_DRV_MAX6900 is not set
 CONFIG_RTC_DRV_RS5C372=m
@@ -2720,48 +2168,45 @@
 CONFIG_RTC_DRV_PCF8563=m
 CONFIG_RTC_DRV_PCF8583=m
 # CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
 
 #
 # SPI RTC drivers
 #
-CONFIG_RTC_DRV_RS5C348=m
-CONFIG_RTC_DRV_MAX6902=m
 
 #
 # Platform RTC drivers
 #
 # CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 CONFIG_RTC_DRV_DS1553=m
-# CONFIG_RTC_DRV_STK17TA8 is not set
 CONFIG_RTC_DRV_DS1742=m
+# CONFIG_RTC_DRV_STK17TA8 is not set
 CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
 CONFIG_RTC_DRV_V3020=m
 
 #
 # on-CPU RTC drivers
 #
-
-#
-# DMA Engine support
-#
-CONFIG_DMA_ENGINE=y
-
-#
-# DMA Clients
-#
-CONFIG_NET_DMA=y
-
-#
-# DMA Devices
-#
-CONFIG_INTEL_IOATDMA=m
+# CONFIG_RTC_DRV_AU1XXX is not set
+# CONFIG_DMADEVICES is not set
 # CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
 
 #
-# Userspace I/O
+# TI VLYNQ
 #
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -2772,44 +2217,43 @@
 CONFIG_EXT2_FS_SECURITY=y
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=m
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=m
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
+# CONFIG_XFS_FS is not set
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-CONFIG_MINIX_FS=m
-CONFIG_ROMFS_FS=m
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_QUOTA=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
 CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
 
 #
 # CD-ROM/DVD Filesystems
@@ -2838,73 +2282,79 @@
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
 CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
 # CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
 CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
 CONFIG_JFFS2_RTIME=y
 # CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
 CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
-
-#
-# Network File Systems
-#
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=m
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
+# CONFIG_NFS_V4_1 is not set
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 # CONFIG_NFSD_V3_ACL is not set
 CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=m
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC_GSS=m
-# CONFIG_SUNRPC_BIND34 is not set
 CONFIG_RPCSEC_GSS_KRB5=m
 CONFIG_RPCSEC_GSS_SPKM3=m
 CONFIG_SMB_FS=m
 # CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CEPH_FS is not set
 CONFIG_CIFS=m
 # CONFIG_CIFS_STATS is not set
 # CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
 # CONFIG_CIFS_XATTR is not set
 # CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
 # CONFIG_CIFS_EXPERIMENTAL is not set
 CONFIG_NCP_FS=m
 CONFIG_NCPFS_PACKET_SIGNING=y
@@ -2916,7 +2366,6 @@
 CONFIG_NCPFS_NLS=y
 CONFIG_NCPFS_EXTRAS=y
 CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
 CONFIG_AFS_FS=m
 # CONFIG_AFS_DEBUG is not set
 
@@ -2948,10 +2397,6 @@
 CONFIG_KARMA_PARTITION=y
 CONFIG_EFI_PARTITION=y
 # CONFIG_SYSV68_PARTITION is not set
-
-#
-# Native Language Support
-#
 CONFIG_NLS=y
 CONFIG_NLS_DEFAULT="cp437"
 CONFIG_NLS_CODEPAGE_437=m
@@ -2992,118 +2437,179 @@
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
+# CONFIG_DLM is not set
 
 #
 # Kernel hacking
 #
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 # CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
 # CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
 # CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
 # CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
 
 #
 # Security options
 #
 CONFIG_KEYS=y
 # CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-CONFIG_SECURITY_CAPABILITIES=m
-CONFIG_SECURITY_ROOTPLUG=m
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_SECURITY_SELINUX_DEVELOP=y
-CONFIG_SECURITY_SELINUX_AVC_STATS=y
-CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
-# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
 CONFIG_CRYPTO_HMAC=y
 # CONFIG_CRYPTO_XCBC is not set
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
 CONFIG_CRYPTO_SHA1=m
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_TGR192=m
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
 CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_CAMELLIA is not set
-CONFIG_CRYPTO_TEST=m
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC16=m
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
 CONFIG_LIBCRC32C=m
 CONFIG_AUDIT_GENERIC=y
 CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_TEXTSEARCH_BM=m
 CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
-CONFIG_CHECK_SIGNATURE=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 57a5048..90a032a 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Mon Apr 28 12:24:17 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May  1 11:49:51 2010
 #
 CONFIG_MIPS=y
 
@@ -9,22 +9,25 @@
 # Machine selection
 #
 # CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
 # CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
-# CONFIG_MIPS_ATLAS is not set
+# CONFIG_MACH_LOONGSON is not set
 # CONFIG_MIPS_MALTA is not set
-# CONFIG_MIPS_SEAD is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
 # CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
 # CONFIG_SGI_IP22 is not set
 # CONFIG_SGI_IP27 is not set
 # CONFIG_SGI_IP28 is not set
@@ -38,11 +41,14 @@
 # CONFIG_SIBYTE_SENTOSA is not set
 # CONFIG_SIBYTE_BIGSUR is not set
 # CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
 CONFIG_MIKROTIK_RB532=y
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -53,14 +59,15 @@
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_BOOT_RAW=y
+CONFIG_CEVT_R4K_LIB=y
 CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
 CONFIG_CSRC_R4K=y
 CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
 # CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_GPIO=y
 # CONFIG_CPU_BIG_ENDIAN is not set
@@ -73,7 +80,8 @@
 #
 # CPU selection
 #
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
 CONFIG_CPU_MIPS32_R1=y
 # CONFIG_CPU_MIPS32_R2 is not set
 # CONFIG_CPU_MIPS64_R1 is not set
@@ -86,6 +94,7 @@
 # CONFIG_CPU_TX49XX is not set
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -93,11 +102,13 @@
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
 CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_CPU_MIPS32=y
 CONFIG_CPU_MIPSR1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
 
 #
 # Kernel type
@@ -107,11 +118,13 @@
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_8KB is not set
 # CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
 # CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_CPU_HAS_PREFETCH=y
 CONFIG_MIPS_MT_DISABLED=y
 # CONFIG_MIPS_MT_SMP is not set
 # CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
 CONFIG_CPU_HAS_SYNC=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
@@ -124,12 +137,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_TICK_ONESHOT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -151,6 +165,7 @@
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -168,23 +183,31 @@
 # CONFIG_BSD_PROCESS_ACCT_V3 is not set
 # CONFIG_TASKSTATS is not set
 # CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
 CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_RELAY is not set
 # CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
 # CONFIG_KALLSYMS is not set
@@ -192,54 +215,87 @@
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 # CONFIG_ELF_CORE is not set
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
 # CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_PCI_QUIRKS is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
 CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
 CONFIG_IOSCHED_DEADLINE=y
 # CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
 CONFIG_DEFAULT_DEADLINE=y
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="deadline"
-CONFIG_CLASSIC_RCU=y
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -248,7 +304,8 @@
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
 CONFIG_MMU=y
 # CONFIG_PCCARD is not set
 # CONFIG_HOTPLUG_PCI is not set
@@ -257,25 +314,22 @@
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 CONFIG_TRAD_SIGNALS=y
 
 #
 # Power management options
 #
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
 # Networking options
 #
 CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
@@ -325,7 +379,6 @@
 # CONFIG_DEFAULT_RENO is not set
 CONFIG_DEFAULT_TCP_CONG="vegas"
 # CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
 # CONFIG_IPV6 is not set
 # CONFIG_NETWORK_SECMARK is not set
 CONFIG_NETFILTER=y
@@ -336,8 +389,9 @@
 #
 # Core Netfilter Configuration
 #
+CONFIG_NETFILTER_NETLINK=m
 # CONFIG_NETFILTER_NETLINK_QUEUE is not set
-# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NETFILTER_NETLINK_LOG=m
 CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CT_ACCT=y
 CONFIG_NF_CONNTRACK_MARK=y
@@ -355,18 +409,23 @@
 # CONFIG_NF_CONNTRACK_SIP is not set
 CONFIG_NF_CONNTRACK_TFTP=m
 # CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NETFILTER_TPROXY is not set
 CONFIG_NETFILTER_XTABLES=y
 # CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
 # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
 # CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
 # CONFIG_NETFILTER_XT_TARGET_MARK is not set
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
 # CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
 # CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 # CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
 # CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 # CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
@@ -375,18 +434,21 @@
 CONFIG_NETFILTER_XT_MATCH_DCCP=m
 # CONFIG_NETFILTER_XT_MATCH_DSCP is not set
 # CONFIG_NETFILTER_XT_MATCH_ESP is not set
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 # CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
 # CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
 # CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 # CONFIG_NETFILTER_XT_MATCH_MAC is not set
 # CONFIG_NETFILTER_XT_MATCH_MARK is not set
-# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
 # CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
 # CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
 # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
 CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
 CONFIG_NETFILTER_XT_MATCH_SCTP=m
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
@@ -394,20 +456,21 @@
 # CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
 # CONFIG_NETFILTER_XT_MATCH_TIME is not set
 CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_IP_VS is not set
 
 #
 # IP: Netfilter Configuration
 #
+CONFIG_NF_DEFRAG_IPV4=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_CONNTRACK_PROC_COMPAT=y
 # CONFIG_IP_NF_QUEUE is not set
 CONFIG_IP_NF_IPTABLES=y
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
-# CONFIG_IP_NF_MATCH_AH is not set
-# CONFIG_IP_NF_MATCH_TTL is not set
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
 CONFIG_IP_NF_FILTER=y
 CONFIG_IP_NF_TARGET_REJECT=y
 # CONFIG_IP_NF_TARGET_LOG is not set
@@ -415,8 +478,8 @@
 CONFIG_NF_NAT=y
 CONFIG_NF_NAT_NEEDED=y
 CONFIG_IP_NF_TARGET_MASQUERADE=y
-# CONFIG_IP_NF_TARGET_REDIRECT is not set
 # CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
 # CONFIG_NF_NAT_SNMP_BASIC is not set
 CONFIG_NF_NAT_FTP=m
 CONFIG_NF_NAT_IRC=m
@@ -426,17 +489,22 @@
 # CONFIG_NF_NAT_H323 is not set
 # CONFIG_NF_NAT_SIP is not set
 CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
 # CONFIG_IP_NF_TARGET_ECN is not set
 # CONFIG_IP_NF_TARGET_TTL is not set
-# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
 CONFIG_IP_NF_RAW=m
 # CONFIG_IP_NF_ARPTABLES is not set
 # CONFIG_IP_DCCP is not set
 # CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
+CONFIG_STP=y
 CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
 CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
 # CONFIG_DECNET is not set
 CONFIG_LLC=y
 CONFIG_LLC2=m
@@ -446,6 +514,8 @@
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
 
 #
@@ -455,7 +525,7 @@
 # CONFIG_NET_SCH_HTB is not set
 # CONFIG_NET_SCH_HFSC is not set
 CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
 # CONFIG_NET_SCH_RED is not set
 # CONFIG_NET_SCH_SFQ is not set
 # CONFIG_NET_SCH_TEQL is not set
@@ -463,6 +533,7 @@
 # CONFIG_NET_SCH_GRED is not set
 # CONFIG_NET_SCH_DSMARK is not set
 CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
 # CONFIG_NET_SCH_INGRESS is not set
 
 #
@@ -496,8 +567,10 @@
 # CONFIG_NET_ACT_NAT is not set
 CONFIG_NET_ACT_PEDIT=m
 # CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
 CONFIG_NET_CLS_IND=y
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -514,14 +587,19 @@
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
 CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
 
 #
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
 #
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -533,13 +611,17 @@
 # Generic Driver Options
 #
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
 # CONFIG_MTD_CONCAT is not set
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
@@ -612,6 +694,11 @@
 # CONFIG_MTD_ONENAND is not set
 
 #
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
 # UBI - Unsorted block images
 #
 # CONFIG_MTD_UBI is not set
@@ -623,23 +710,36 @@
 # CONFIG_BLK_DEV_UMEM is not set
 # CONFIG_BLK_DEV_COW_COMMON is not set
 # CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
 # CONFIG_BLK_DEV_NBD is not set
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_RAM is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
@@ -656,10 +756,6 @@
 # CONFIG_BLK_DEV_SR is not set
 # CONFIG_CHR_DEV_SG is not set
 # CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
 # CONFIG_SCSI_MULTI_LUN is not set
 # CONFIG_SCSI_CONSTANTS is not set
 # CONFIG_SCSI_LOGGING is not set
@@ -676,27 +772,35 @@
 # CONFIG_SCSI_SRP_ATTRS is not set
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
 # CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
 # CONFIG_SCSI_ACARD is not set
 # CONFIG_SCSI_AACRAID is not set
 # CONFIG_SCSI_AIC7XXX is not set
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
 # CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
 # CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
 # CONFIG_SCSI_INITIO is not set
 # CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_STEX is not set
 # CONFIG_SCSI_SYM53C8XX_2 is not set
 # CONFIG_SCSI_IPR is not set
@@ -708,9 +812,15 @@
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_NSP32 is not set
 # CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
 # CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
 CONFIG_ATA=y
 # CONFIG_ATA_NONSTANDARD is not set
+# CONFIG_ATA_VERBOSE_ERROR is not set
 # CONFIG_SATA_PMP is not set
 # CONFIG_SATA_AHCI is not set
 # CONFIG_SATA_SIL24 is not set
@@ -732,6 +842,7 @@
 # CONFIG_PATA_ALI is not set
 # CONFIG_PATA_AMD is not set
 # CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
 # CONFIG_PATA_ATIIXP is not set
 # CONFIG_PATA_CMD640_PCI is not set
 # CONFIG_PATA_CMD64X is not set
@@ -747,6 +858,7 @@
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_IT8213 is not set
 # CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
 # CONFIG_PATA_TRIFLEX is not set
 # CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
@@ -757,29 +869,39 @@
 # CONFIG_PATA_NS87415 is not set
 # CONFIG_PATA_OPTI is not set
 # CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
 # CONFIG_PATA_PDC_OLD is not set
 # CONFIG_PATA_RADISYS is not set
 CONFIG_PATA_RB532=y
+# CONFIG_PATA_RDC is not set
 # CONFIG_PATA_RZ1000 is not set
 # CONFIG_PATA_SC1200 is not set
 # CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
 # CONFIG_PATA_SIL680 is not set
 # CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
 # CONFIG_PATA_VIA is not set
 # CONFIG_PATA_WINBOND is not set
 # CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_SCH is not set
 # CONFIG_MD is not set
 # CONFIG_FUSION is not set
 
 #
 # IEEE 1394 (FireWire) support
 #
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 CONFIG_IFB=m
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
@@ -797,21 +919,28 @@
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
 # CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
 # CONFIG_B44 is not set
 # CONFIG_FORCEDETH is not set
 # CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
 # CONFIG_E100 is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -821,30 +950,27 @@
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 CONFIG_VIA_RHINE=y
 # CONFIG_VIA_RHINE_MMIO is not set
-CONFIG_VIA_RHINE_NAPI=y
 # CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
 CONFIG_ATMEL=m
 # CONFIG_PCI_ATMEL is not set
 # CONFIG_PRISM54 is not set
-# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
@@ -864,6 +990,7 @@
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
 # CONFIG_ISDN is not set
 # CONFIG_PHONE is not set
 
@@ -872,7 +999,8 @@
 #
 CONFIG_INPUT=y
 # CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_SPARSEKMAP is not set
 
 #
 # Userland interfaces
@@ -887,17 +1015,29 @@
 #
 CONFIG_INPUT_KEYBOARD=y
 # CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
 # CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-# CONFIG_KEYBOARD_STOWAWAY is not set
 # CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+CONFIG_INPUT_RB532_BUTTON=y
 
 #
 # Hardware I/O ports
@@ -909,6 +1049,7 @@
 # Character devices
 #
 # CONFIG_VT is not set
+CONFIG_DEVKMEM=y
 # CONFIG_SERIAL_NONSTANDARD is not set
 # CONFIG_NOZOMI is not set
 
@@ -928,105 +1069,95 @@
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
-# CONFIG_RTC is not set
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
 # CONFIG_I2C is not set
+# CONFIG_SPI is not set
 
 #
-# SPI support
+# PPS support
 #
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
 # CONFIG_THERMAL is not set
 CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
+CONFIG_WATCHDOG_NOWAYOUT=y
 
 #
 # Watchdog Device Drivers
 #
 # CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_RC32434_WDT=y
 
 #
 # PCI-based Watchdog Cards
 #
 # CONFIG_PCIPCWATCHDOG is not set
 # CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
-CONFIG_VIDEO_ALLOW_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_V4L1=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-
-#
-# Encoders/decoders and other helper chips
-#
-
-#
-# Audio decoders
-#
-
-#
-# Video decoders
-#
-
-#
-# Video and audio decoders
-#
-
-#
-# MPEG video encoders
-#
-# CONFIG_VIDEO_CX2341X is not set
-
-#
-# Video encoders
-#
-
-#
-# Video improvement chips
-#
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_CPIA is not set
-# CONFIG_VIDEO_STRADIS is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_RADIO_ADAPTERS is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
 
 #
 # Graphics support
 #
+# CONFIG_VGA_ARB is not set
 # CONFIG_DRM is not set
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1037,13 +1168,10 @@
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 CONFIG_HID_SUPPORT=y
 # CONFIG_HID is not set
+# CONFIG_HID_PID is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1053,9 +1181,18 @@
 # CONFIG_USB_OTG_BLACKLIST_HUB is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1064,41 +1201,67 @@
 #
 # LED drivers
 #
+CONFIG_LEDS_MIKROTIK_RB532=y
 # CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
 
 #
 # LED Triggers
 #
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
 CONFIG_RTC_LIB=y
 # CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
 
 #
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
 # File systems
 #
 CONFIG_EXT2_FS=y
 # CONFIG_EXT2_FS_XATTR is not set
 # CONFIG_EXT2_FS_XIP is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
 
 #
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
 # CD-ROM/DVD Filesystems
 #
 # CONFIG_ISO9660_FS is not set
@@ -1117,15 +1280,13 @@
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_HFS_FS is not set
@@ -1148,9 +1309,14 @@
 CONFIG_JFFS2_CMODE_PRIORITY=y
 # CONFIG_JFFS2_CMODE_SIZE is not set
 # CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
 # CONFIG_VXFS_FS is not set
 # CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
 # CONFIG_ROMFS_FS is not set
@@ -1160,6 +1326,7 @@
 # CONFIG_NFS_FS is not set
 # CONFIG_NFSD is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
@@ -1198,11 +1365,22 @@
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_CMDLINE_BOOL is not set
 
 #
@@ -1210,18 +1388,32 @@
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=m
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
 # CONFIG_CRYPTO_CRYPTD is not set
 # CONFIG_CRYPTO_AUTHENC is not set
 CONFIG_CRYPTO_TEST=m
@@ -1249,14 +1441,20 @@
 #
 # CONFIG_CRYPTO_HMAC is not set
 # CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
 
 #
 # Digest
 #
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
 # CONFIG_CRYPTO_MD4 is not set
 # CONFIG_CRYPTO_MD5 is not set
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
 # CONFIG_CRYPTO_SHA1 is not set
 # CONFIG_CRYPTO_SHA256 is not set
 # CONFIG_CRYPTO_SHA512 is not set
@@ -1266,7 +1464,7 @@
 #
 # Ciphers
 #
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
 # CONFIG_CRYPTO_ANUBIS is not set
 # CONFIG_CRYPTO_ARC4 is not set
 # CONFIG_CRYPTO_BLOWFISH is not set
@@ -1286,27 +1484,36 @@
 # Compression
 #
 # CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_ZLIB=y
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
 # CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
 CONFIG_LIBCRC32C=m
 CONFIG_ZLIB_INFLATE=y
 CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_TEXTSEARCH_BM=m
 CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
index 48223b0..19002d6 100644
--- a/arch/mips/include/asm/kgdb.h
+++ b/arch/mips/include/asm/kgdb.h
@@ -38,6 +38,8 @@
 extern void *saved_vectors[32];
 extern void handle_exception(struct pt_regs *regs);
 extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig);
 
 #endif				/* __KERNEL__ */
 
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index ae07423..e76941db 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -190,8 +190,6 @@
 /* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
 void au1xxx_save_and_sleep(void);
 void au_sleep(void);
-void save_au1xxx_intctl(void);
-void restore_au1xxx_intctl(void);
 
 
 /* SOC Interrupt numbers */
@@ -835,6 +833,38 @@
 #define MEM_STNAND_DATA 	0x20
 #endif
 
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD		0x40
+#define IC_CFG0SET		0x40
+#define IC_CFG0CLR		0x44
+#define IC_CFG1RD		0x48
+#define IC_CFG1SET		0x48
+#define IC_CFG1CLR		0x4C
+#define IC_CFG2RD		0x50
+#define IC_CFG2SET		0x50
+#define IC_CFG2CLR		0x54
+#define IC_REQ0INT		0x54
+#define IC_SRCRD		0x58
+#define IC_SRCSET		0x58
+#define IC_SRCCLR		0x5C
+#define IC_REQ1INT		0x5C
+#define IC_ASSIGNRD		0x60
+#define IC_ASSIGNSET		0x60
+#define IC_ASSIGNCLR		0x64
+#define IC_WAKERD		0x68
+#define IC_WAKESET		0x68
+#define IC_WAKECLR		0x6C
+#define IC_MASKRD		0x70
+#define IC_MASKSET		0x70
+#define IC_MASKCLR		0x74
+#define IC_RISINGRD		0x78
+#define IC_RISINGCLR		0x78
+#define IC_FALLINGRD		0x7C
+#define IC_FALLINGCLR		0x7C
+#define IC_TESTBIT		0x80
+
+
 /* Interrupt Controller 0 */
 #define IC0_CFG0RD		0xB0400040
 #define IC0_CFG0SET		0xB0400040
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 8c6b110..c8a553a3 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -358,10 +358,6 @@
 u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
 extern void au1xxx_ddma_del_device(u32 devid);
 void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void);
-void au1xxx_dbdma_resume(void);
-#endif
 
 /*
  *	Flags for the put_source/put_dest functions.
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 43d4da0..3999ec0 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -20,7 +20,7 @@
 	}
 }
 
-#define GPIO_DIR_OUT	0x0
-#define GPIO_DIR_IN	0x1
+#define BCM63XX_GPIO_DIR_OUT	0x0
+#define BCM63XX_GPIO_DIR_IN	0x1
 
 #endif /* !BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 16210ce..675bd86 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -52,6 +52,8 @@
 #define cpu_has_tx39_cache	0
 #define cpu_has_userlocal	0
 #define cpu_has_vce		0
+#define cpu_has_veic		0
+#define cpu_has_vint		0
 #define cpu_has_vtag_icache	0
 #define cpu_has_watch		1
 
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
new file mode 100644
index 0000000..e30e73d
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -0,0 +1,35 @@
+/*
+ * STLS2F GPIO Support
+ *
+ * Copyright (c) 2008  Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008-2010  Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef	__STLS2F_GPIO_H
+#define	__STLS2F_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+extern void gpio_set_value(unsigned gpio, int value);
+extern int gpio_get_value(unsigned gpio);
+extern int gpio_cansleep(unsigned gpio);
+
+/* The chip can do interrupt
+ * but it has not been tested and doc not clear
+ */
+static inline int gpio_to_irq(int gpio)
+{
+	return -EINVAL;
+}
+
+static inline int irq_to_gpio(int gpio)
+{
+	return -EINVAL;
+}
+
+#endif				/* __STLS2F_GPIO_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ab38791..5d33b72 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -344,16 +344,10 @@
 #ifdef CONFIG_CPU_HAS_PREFETCH
 
 #define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
 
-static inline void prefetch(const void *addr)
-{
-	__asm__ __volatile__(
-	"	.set	mips4		\n"
-	"	pref	%0, (%1)	\n"
-	"	.set	mips0		\n"
-	:
-	: "i" (Pref_Load), "r" (addr));
-}
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
 
 #endif
 
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index be5bb16..3562b85 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -125,6 +125,30 @@
 
 __setup("nowait", wait_disable);
 
+static int __cpuinitdata mips_fpu_disabled;
+
+static int __init fpu_disable(char *s)
+{
+	cpu_data[0].options &= ~MIPS_CPU_FPU;
+	mips_fpu_disabled = 1;
+
+	return 1;
+}
+
+__setup("nofpu", fpu_disable);
+
+int __cpuinitdata mips_dsp_disabled;
+
+static int __init dsp_disable(char *s)
+{
+	cpu_data[0].ases &= ~MIPS_ASE_DSP;
+	mips_dsp_disabled = 1;
+
+	return 1;
+}
+
+__setup("nodsp", dsp_disable);
+
 void __init check_wait(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -982,6 +1006,12 @@
 	 */
 	BUG_ON(current_cpu_type() != c->cputype);
 
+	if (mips_fpu_disabled)
+		c->options &= ~MIPS_CPU_FPU;
+
+	if (mips_dsp_disabled)
+		c->ases &= ~MIPS_ASE_DSP;
+
 	if (c->options & MIPS_CPU_FPU) {
 		c->fpu_id = cpu_get_fpu_id();
 
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index 2f6a0b1..ae5db20 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -65,7 +65,7 @@
 		return -ENODEV;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	if (cpufreq_frequency_table_target
 	    (policy, &loongson2_clockmod_table[0], target_freq, relation,
@@ -91,7 +91,7 @@
 	/* notifiers */
 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 	/* setting the cpu frequency */
 	clk_set_rate(cpuclk, freq);
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 50c9bb8..9b78ff6 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -180,6 +180,11 @@
 	*(ptr++) = regs->cp0_epc;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->cp0_epc = pc;
+}
+
 /*
  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
  * then try to fall into the debugger
@@ -198,7 +203,7 @@
 	if (atomic_read(&kgdb_active) != -1)
 		kgdb_nmicallback(smp_processor_id(), regs);
 
-	if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
+	if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
 		return NOTIFY_DONE;
 
 	if (atomic_read(&kgdb_setting_breakpoint))
@@ -212,6 +217,26 @@
 	return NOTIFY_STOP;
 }
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+		 struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs	= regs,
+		.str	= str,
+		.err	= err,
+		.trapnr	= trap,
+		.signr	= sig,
+
+	};
+
+	if (!kgdb_io_module_registered)
+		return NOTIFY_DONE;
+
+	return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
 static struct notifier_block kgdb_notifier = {
 	.notifier_call = kgdb_mips_notify,
 };
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cbc6182..f5981c4 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -100,10 +100,10 @@
 	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
 	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
 		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
-		retval = set_cpus_allowed(p, effective_mask);
+		retval = set_cpus_allowed_ptr(p, &effective_mask);
 	} else {
 		clear_ti_thread_flag(ti, TIF_FPUBOUND);
-		retval = set_cpus_allowed(p, new_mask);
+		retval = set_cpus_allowed_ptr(p, &new_mask);
 	}
 
 out_unlock:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f9513f9..85aef3f 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -569,27 +569,6 @@
 	plat_smp_setup();
 }
 
-static int __init fpu_disable(char *s)
-{
-	int i;
-
-	for (i = 0; i < NR_CPUS; i++)
-		cpu_data[i].options &= ~MIPS_CPU_FPU;
-
-	return 1;
-}
-
-__setup("nofpu", fpu_disable);
-
-static int __init dsp_disable(char *s)
-{
-	cpu_data[0].ases &= ~MIPS_ASE_DSP;
-
-	return 1;
-}
-
-__setup("nodsp", dsp_disable);
-
 unsigned long kernelsp[NR_CPUS];
 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d612c6d..8bdd6a6 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -26,6 +26,7 @@
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
 #include <linux/notifier.h>
+#include <linux/kdb.h>
 
 #include <asm/bootinfo.h>
 #include <asm/branch.h>
@@ -185,6 +186,11 @@
 			regs.regs[29] = task->thread.reg29;
 			regs.regs[31] = 0;
 			regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+		} else if (atomic_read(&kgdb_active) != -1 &&
+			   kdb_current_regs) {
+			memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
 		} else {
 			prepare_frametrace(&regs);
 		}
@@ -360,6 +366,8 @@
 	unsigned long dvpret = dvpe();
 #endif /* CONFIG_MIPS_MT_SMTC */
 
+	notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+
 	console_verbose();
 	spin_lock_irq(&die_lock);
 	bust_spinlocks(1);
@@ -704,6 +712,11 @@
 	siginfo_t info;
 	char b[40];
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+		return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
 	if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
 		return;
 
@@ -854,7 +867,7 @@
 				= current->cpus_allowed;
 			cpus_and(tmask, current->cpus_allowed,
 				mt_fpu_cpumask);
-			set_cpus_allowed(current, tmask);
+			set_cpus_allowed_ptr(current, &tmask);
 			set_thread_flag(TIF_FPUBOUND);
 		}
 	}
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 7668c4d..cdd2e81 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -4,6 +4,7 @@
 
 obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
     pci.o bonito-irq.o mem.o machtype.o platform.o
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
 
 #
 # Serial port support
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
new file mode 100644
index 0000000..e8a0ffa
--- /dev/null
+++ b/arch/mips/loongson/common/gpio.c
@@ -0,0 +1,139 @@
+/*
+ *  STLS2F GPIO Support
+ *
+ *  Copyright (c) 2008 Richard Liu,  STMicroelectronics  <richard.liu@st.com>
+ *  Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <asm/types.h>
+#include <loongson.h>
+#include <linux/gpio.h>
+
+#define STLS2F_N_GPIO		4
+#define STLS2F_GPIO_IN_OFFSET	16
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+int gpio_get_value(unsigned gpio)
+{
+	u32 val;
+	u32 mask;
+
+	if (gpio >= STLS2F_N_GPIO)
+		return __gpio_get_value(gpio);
+
+	mask = 1 << (gpio + STLS2F_GPIO_IN_OFFSET);
+	spin_lock(&gpio_lock);
+	val = LOONGSON_GPIODATA;
+	spin_unlock(&gpio_lock);
+
+	return ((val & mask) != 0);
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int state)
+{
+	u32 val;
+	u32 mask;
+
+	if (gpio >= STLS2F_N_GPIO) {
+		__gpio_set_value(gpio, state);
+		return ;
+	}
+
+	mask = 1 << gpio;
+
+	spin_lock(&gpio_lock);
+	val = LOONGSON_GPIODATA;
+	if (state)
+		val |= mask;
+	else
+		val &= (~mask);
+	LOONGSON_GPIODATA = val;
+	spin_unlock(&gpio_lock);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_cansleep(unsigned gpio)
+{
+	if (gpio < STLS2F_N_GPIO)
+		return 0;
+	else
+		return __gpio_cansleep(gpio);
+}
+EXPORT_SYMBOL(gpio_cansleep);
+
+static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	u32 temp;
+	u32 mask;
+
+	if (gpio >= STLS2F_N_GPIO)
+		return -EINVAL;
+
+	spin_lock(&gpio_lock);
+	mask = 1 << gpio;
+	temp = LOONGSON_GPIOIE;
+	temp |= mask;
+	LOONGSON_GPIOIE = temp;
+	spin_unlock(&gpio_lock);
+
+	return 0;
+}
+
+static int ls2f_gpio_direction_output(struct gpio_chip *chip,
+		unsigned gpio, int level)
+{
+	u32 temp;
+	u32 mask;
+
+	if (gpio >= STLS2F_N_GPIO)
+		return -EINVAL;
+
+	gpio_set_value(gpio, level);
+	spin_lock(&gpio_lock);
+	mask = 1 << gpio;
+	temp = LOONGSON_GPIOIE;
+	temp &= (~mask);
+	LOONGSON_GPIOIE = temp;
+	spin_unlock(&gpio_lock);
+
+	return 0;
+}
+
+static int ls2f_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+	return gpio_get_value(gpio);
+}
+
+static void ls2f_gpio_set_value(struct gpio_chip *chip,
+		unsigned gpio, int value)
+{
+	gpio_set_value(gpio, value);
+}
+
+static struct gpio_chip ls2f_chip = {
+	.label                  = "ls2f",
+	.direction_input        = ls2f_gpio_direction_input,
+	.get                    = ls2f_gpio_get_value,
+	.direction_output       = ls2f_gpio_direction_output,
+	.set                    = ls2f_gpio_set_value,
+	.base                   = 0,
+	.ngpio                  = STLS2F_N_GPIO,
+};
+
+static int __init ls2f_gpio_setup(void)
+{
+	return gpiochip_add(&ls2f_chip);
+}
+arch_initcall(ls2f_gpio_setup);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index f2338d1..47842b7 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -354,7 +354,8 @@
 
 			if (MIPSInst_RD(ir) == FPCREG_CSR) {
 				value = ctx->fcr31;
-				value = (value & ~0x3) | mips_rm[value & 0x3];
+				value = (value & ~FPU_CSR_RM) |
+					mips_rm[modeindex(value)];
 #ifdef CSRTRACE
 				printk("%p gpr[%d]<-csr=%08x\n",
 					(void *) (xcp->cp0_epc),
@@ -907,7 +908,7 @@
 			ieee754sp fs;
 
 			SPFROMREG(fs, MIPSInst_FS(ir));
-			ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+			ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
 			rv.w = ieee754sp_tint(fs);
 			ieee754_csr.rm = oldrm;
 			rfmt = w_fmt;
@@ -933,7 +934,7 @@
 			ieee754sp fs;
 
 			SPFROMREG(fs, MIPSInst_FS(ir));
-			ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+			ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
 			rv.l = ieee754sp_tlong(fs);
 			ieee754_csr.rm = oldrm;
 			rfmt = l_fmt;
@@ -1081,7 +1082,7 @@
 			ieee754dp fs;
 
 			DPFROMREG(fs, MIPSInst_FS(ir));
-			ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+			ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
 			rv.w = ieee754dp_tint(fs);
 			ieee754_csr.rm = oldrm;
 			rfmt = w_fmt;
@@ -1107,7 +1108,7 @@
 			ieee754dp fs;
 
 			DPFROMREG(fs, MIPSInst_FS(ir));
-			ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+			ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
 			rv.l = ieee754dp_tlong(fs);
 			ieee754_csr.rm = oldrm;
 			rfmt = l_fmt;
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index fa3bf66..d0d24e0 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -8,7 +8,6 @@
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
- *
  */
 #include <linux/init.h>
 #include <linux/oprofile.h>
@@ -17,24 +16,18 @@
 #include <loongson.h>			/* LOONGSON2_PERFCNT_IRQ */
 #include "op_impl.h"
 
-/*
- * a patch should be sent to oprofile with the loongson-specific support.
- * otherwise, the oprofile tool will not recognize this and complain about
- * "cpu_type 'unset' is not valid".
- */
 #define LOONGSON2_CPU_TYPE	"mips/loongson2"
 
-#define LOONGSON2_COUNTER1_EVENT(event)	((event & 0x0f) << 5)
-#define LOONGSON2_COUNTER2_EVENT(event)	((event & 0x0f) << 9)
-
-#define LOONGSON2_PERFCNT_EXL			(1UL	<<  0)
-#define LOONGSON2_PERFCNT_KERNEL		(1UL    <<  1)
-#define LOONGSON2_PERFCNT_SUPERVISOR	(1UL    <<  2)
-#define LOONGSON2_PERFCNT_USER			(1UL    <<  3)
-#define LOONGSON2_PERFCNT_INT_EN		(1UL    <<  4)
 #define LOONGSON2_PERFCNT_OVERFLOW		(1ULL   << 31)
 
-/* Loongson2 performance counter register */
+#define LOONGSON2_PERFCTRL_EXL			(1UL	<<  0)
+#define LOONGSON2_PERFCTRL_KERNEL		(1UL    <<  1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR		(1UL    <<  2)
+#define LOONGSON2_PERFCTRL_USER			(1UL    <<  3)
+#define LOONGSON2_PERFCTRL_ENABLE		(1UL    <<  4)
+#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
+	(((event) & 0x0f) << ((idx) ? 9 : 5))
+
 #define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
 #define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
 #define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
@@ -49,7 +42,6 @@
 
 static char *oprofid = "LoongsonPerf";
 static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
-/* Compute all of the registers in preparation for enabling profiling.  */
 
 static void loongson2_reg_setup(struct op_counter_config *cfg)
 {
@@ -57,41 +49,38 @@
 
 	reg.reset_counter1 = 0;
 	reg.reset_counter2 = 0;
-	/* Compute the performance counter ctrl word.  */
-	/* For now count kernel and user mode */
+
+	/*
+	 * Compute the performance counter ctrl word.
+	 * For now, count kernel and user mode.
+	 */
 	if (cfg[0].enabled) {
-		ctrl |= LOONGSON2_COUNTER1_EVENT(cfg[0].event);
+		ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
 		reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
 	}
 
 	if (cfg[1].enabled) {
-		ctrl |= LOONGSON2_COUNTER2_EVENT(cfg[1].event);
-		reg.reset_counter2 = (0x80000000ULL - cfg[1].count);
+		ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
+		reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
 	}
 
 	if (cfg[0].enabled || cfg[1].enabled) {
-		ctrl |= LOONGSON2_PERFCNT_EXL | LOONGSON2_PERFCNT_INT_EN;
+		ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
 		if (cfg[0].kernel || cfg[1].kernel)
-			ctrl |= LOONGSON2_PERFCNT_KERNEL;
+			ctrl |= LOONGSON2_PERFCTRL_KERNEL;
 		if (cfg[0].user || cfg[1].user)
-			ctrl |= LOONGSON2_PERFCNT_USER;
+			ctrl |= LOONGSON2_PERFCTRL_USER;
 	}
 
 	reg.ctrl = ctrl;
 
 	reg.cnt1_enabled = cfg[0].enabled;
 	reg.cnt2_enabled = cfg[1].enabled;
-
 }
 
-/* Program all of the registers in preparation for enabling profiling.  */
-
 static void loongson2_cpu_setup(void *args)
 {
-	uint64_t perfcount;
-
-	perfcount = (reg.reset_counter2 << 32) | reg.reset_counter1;
-	write_c0_perfcnt(perfcount);
+	write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
 }
 
 static void loongson2_cpu_start(void *args)
@@ -114,15 +103,8 @@
 	struct pt_regs *regs = get_irq_regs();
 	int enabled;
 
-	/*
-	 * LOONGSON2 defines two 32-bit performance counters.
-	 * To avoid a race updating the registers we need to stop the counters
-	 * while we're messing with
-	 * them ...
-	 */
-
 	/* Check whether the irq belongs to me */
-	enabled = read_c0_perfctrl() & LOONGSON2_PERFCNT_INT_EN;
+	enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
 	if (!enabled)
 		return IRQ_NONE;
 	enabled = reg.cnt1_enabled | reg.cnt2_enabled;
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
index cd5b76a..3fc5d466 100644
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -22,7 +22,9 @@
  */
 
 #include <linux/init.h>
+#include <linux/ioport.h>
 #include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
 
 /*
  * NON_DVR_CAPABLE CALLIOPE RESOURCES
@@ -32,432 +34,234 @@
 	/*
 	 * VIDEO / LX1
 	 */
-	{
-		.name   = "ST231aImage",     	/* Delta-Mu 1 image and ram */
-		.start  = 0x24000000,
-		.end    = 0x24200000 - 1,	/*2MiB */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",   /*8KiB block ST231a monitor */
-		.start  = 0x24200000,
-		.end    = 0x24202000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x24202000,
-		.end    = 0x26700000 - 1, /*~36.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Sysaudio Driver
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * STAVEM driver/STAPI
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x00000000,
-		.end    = 0x00600000 - 1,	/* 6 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 6MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * DOCSIS Subsystem
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x22000000,
-		.end    = 0x22700000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
+
 	/*
 	 * GHW HAL Driver
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x22700000,
-		.end    = 0x23500000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * multi com buffer area
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x23700000,
-		.end    = 0x23720000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * DMA Ring buffer (don't need recording buffers)
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 680KiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Display bins buffer for unit0
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,		/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * AVFS: player HAL memory
-	 *
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x00000000,
-		.end    = 0x002c4c00 - 1,	/* 945K * 3 for playback */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 945K * 3 for playback */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * PMEM
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Smartcard
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * NAND Flash
 	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE + 0x400 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Synopsys GMAC Memory Region
 	 */
-	{
-		.name   = "GMAC",
-		.start  = 0x00000000,
-		.end    = 0x00010000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 64KiB */
+	PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
+	/*
+	 * TFTPBuffer
+	 *
+	 *  This buffer is used in some minimal configurations (e.g. two-way
+	 *  loader) for storing software images
+	 */
+	PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
-	 *
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
 
-struct resource non_dvr_vz_calliope_resources[] __initdata =
-{
-	/*
-	 * VIDEO / LX1
-	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x24000000,
-		.end    = 0x24200000 - 1, /*2 Meg */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8k block ST231a monitor */
-		.start  = 0x24200000,
-		.end    = 0x24202000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x22202000,
-		.end    = 0x22C20B85 - 1,	/* 10.12 Meg */
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * Sysaudio Driver
-	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * STAVEM driver/STAPI
-	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x20300000,
-		.end    = 0x20620000-1,  /*3.125 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * GHW HAL Driver
-	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x20100000,
-		.end    = 0x20300000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * multi com buffer area
-	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x23900000,
-		.end    = 0x23920000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * DMA Ring buffer
-	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * Display bins buffer for unit0
-	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * PMEM
-	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * Smartcard
-	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * NAND Flash
-	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE+0x400 - 1,
-		.flags  = IORESOURCE_IO,
-	},
-	/*
-	 * Synopsys GMAC Memory Region
-	 */
-	{
-		.name   = "GMAC",
-		.start  = 0x00000000,
-		.end    = 0x00010000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	/*
-	 * Add other resources here
-	 */
-	{ },
-};
 
 struct resource non_dvr_vze_calliope_resources[] __initdata =
 {
 	/*
 	 * VIDEO / LX1
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x22000000,
-		.end    = 0x22200000 - 1,	/*2  Meg */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8k block ST231a monitor */
-		.start  = 0x22200000,
-		.end    = 0x22202000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x22202000,
-		.end    = 0x22C20B85 - 1,	/* 10.12 Meg */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (10.12MiB) */
+	PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Sysaudio Driver
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * STAVEM driver/STAPI
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x20396000,
-		.end    = 0x206B6000 - 1,		/* 3.125 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 3.125MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * GHW HAL Driver
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x20100000,
-		.end    = 0x20396000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* PowerTV Graphics Heap (2.59MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * multi com buffer area
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x206B6000,
-		.end    = 0x206D6000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 * DMA Ring buffer
+	 * DMA Ring buffer (don't need recording buffers)
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 680KiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Display bins buffer for unit0
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * PMEM
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Smartcard
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * NAND Flash
 	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE+0x400 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Synopsys GMAC Memory Region
 	 */
-	{
-		.name   = "GMAC",
-		.start  = 0x00000000,
-		.end    = 0x00010000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 64KiB */
+	PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
 
 struct resource non_dvr_vzf_calliope_resources[] __initdata =
@@ -465,156 +269,117 @@
 	/*
 	 * VIDEO / LX1
 	 */
-	{
-		.name   = "ST231aImage",	/*Delta-Mu 1 image and ram */
-		.start  = 0x24000000,
-		.end    = 0x24200000 - 1,	/*2MiB */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",	/*8KiB block ST231a monitor */
-		.start  = 0x24200000,
-		.end    = 0x24202000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x24202000,
-		/* ~19.4 (21.5MiB - (2MiB + 8KiB)) */
-		.end    = 0x25580000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Sysaudio Driver
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * STAVEM driver/STAPI
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x00000000,
-		.end    = 0x00480000 - 1,  /* 4.5 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4.5MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * GHW HAL Driver
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x22700000,
-		.end    = 0x23500000 - 1, /* 14 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * multi com buffer area
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x23700000,
-		.end    = 0x23720000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * DMA Ring buffer (don't need recording buffers)
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 680KiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Display bins buffer for unit0
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,  /* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Display bins buffer for unit1
 	 */
-	{
-		.name   = "DisplayBins1",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,  /* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * AVFS: player HAL memory
-	 *
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x00000000,
-		.end    = 0x002c4c00 - 1,  /* 945K * 3 for playback */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 945K * 3 for playback */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * PMEM
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Smartcard
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * NAND Flash
 	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE + 0x400 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Synopsys GMAC Memory Region
 	 */
-	{
-		.name   = "GMAC",
-		.start  = 0x00000000,
-		.end    = 0x00010000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 64KiB */
+	PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
index 45a5c3e..c532b50 100644
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -22,7 +22,9 @@
  */
 
 #include <linux/init.h>
+#include <linux/ioport.h>
 #include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
 
 /*
  * DVR_CAPABLE CRONUS RESOURCES
@@ -30,305 +32,161 @@
 struct resource dvr_cronus_resources[] __initdata =
 {
 	/*
-	 *
 	 * VIDEO1 / LX1
-	 *
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x24000000,
-		.end    = 0x241FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8KiB block ST231a monitor */
-		.start  = 0x24200000,
-		.end    = 0x24201FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x24202000,
-		.end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * VIDEO2 / LX2
-	 *
 	 */
-	{
-		.name   = "ST231bImage",	/* Delta-Mu 2 image and ram */
-		.start  = 0x60000000,
-		.end    = 0x601FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231bMonitor",	/* 8KiB block ST231b monitor */
-		.start  = 0x60200000,
-		.end    = 0x60201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory2",
-		.start  = 0x60202000,
-		.end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 2 image (2MiB) */
+	PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * Sysaudio Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  DSP_Image_Buff - DSP code and data images (1MB)
-	 *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-	 *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-	 *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-	 *
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * STAVEM driver/STAPI
 	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
 	 *  This memory area is used for allocating buffers for Video decoding
 	 *  purposes.  Allocation/De-allocation within this buffer is managed
 	 *  by the STAVMEM driver of the STAPI.  They could be Decimated
 	 *  Picture Buffers, Intermediate Buffers, as deemed necessary for
 	 *  video decoding purposes, for any video decoders on Zeus.
-	 *
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x63580000,
-		.end    = 0x64180000 - 1,  /* 12 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 12MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * DOCSIS Subsystem
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x62000000,
-		.end    = 0x62700000 - 1,	/* 7 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * GHW HAL Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  GraphicsHeap - PowerTV Graphics Heap
-	 *
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x62700000,
-		.end    = 0x63500000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * multi com buffer area
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x26000000,
-		.end    = 0x26020000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * DMA Ring buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x00280000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Display bins buffer for unit0
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit0
-	 *
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,		/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
-	 * Display bins buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit1
-	 *
+	 * Display bins buffer for unit1
 	 */
-	{
-		.name   = "DisplayBins1",
-		.start  = 0x64AD4000,
-		.end    = 0x64AD5000 - 1,  /* 4 KB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * ITFS
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "ITFS",
-		.start  = 0x64180000,
-		/* 815,104 bytes each for 2 ITFS partitions. */
-		.end    = 0x6430DFFF,
-		.flags  = IORESOURCE_IO,
-	},
+	/* 815,104 bytes each for 2 ITFS partitions. */
+	PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * AVFS
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x6430E000,
-		/* (945K * 8) = (128K *3) 5 playbacks / 3 server */
-		.end    = 0x64AD0000 - 1,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "AvfsFileSys",
-		.start  = 0x64AD0000,
-		.end    = 0x64AD1000 - 1,  /* 4K */
-		.flags  = IORESOURCE_IO,
-	},
+	/* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+		IORESOURCE_MEM)
+
+	/* 4KiB */
+	PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * PMEM
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Persistent memory for diagnostics.
-	 *
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Smartcard
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Read and write buffers for Internal/External cards
-	 *
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x64AD1000,
-		.end    = 0x64AD3800 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * KAVNET
-	 *    NP Reset Vector - must be of the form xxCxxxxx
-	 *	   NP Image - must be video bank 1
-	 *	   NP IPC - must be video bank 2
 	 */
-	{
-		.name   = "NP_Reset_Vector",
-		.start  = 0x27c00000,
-		.end    = 0x27c01000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "NP_Image",
-		.start  = 0x27020000,
-		.end    = 0x27060000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "NP_IPC",
-		.start  = 0x63500000,
-		.end    = 0x63580000 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+	PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+		IORESOURCE_MEM)
+	/* NP Image - must be video bank 1 (320KiB) */
+	PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+	/* NP IPC - must be video bank 2 (512KiB) */
+	PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+	/*
+	 * TFTPBuffer
+	 *
+	 *  This buffer is used in some minimal configurations (e.g. two-way
+	 *  loader) for storing software images
+	 */
+	PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
 
 /*
@@ -337,272 +195,146 @@
 struct resource non_dvr_cronus_resources[] __initdata =
 {
 	/*
-	 *
 	 * VIDEO1 / LX1
-	 *
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x24000000,
-		.end    = 0x241FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8KiB block ST231a monitor */
-		.start  = 0x24200000,
-		.end    = 0x24201FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x24202000,
-		.end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * VIDEO2 / LX2
-	 *
 	 */
-	{
-		.name   = "ST231bImage",	/* Delta-Mu 2 image and ram */
-		.start  = 0x60000000,
-		.end    = 0x601FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231bMonitor",	/* 8KiB block ST231b monitor */
-		.start  = 0x60200000,
-		.end    = 0x60201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory2",
-		.start  = 0x60202000,
-		.end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 2 image (2MiB) */
+	PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * Sysaudio Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  DSP_Image_Buff - DSP code and data images (1MB)
-	 *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-	 *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-	 *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-	 *
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * STAVEM driver/STAPI
 	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
 	 *  This memory area is used for allocating buffers for Video decoding
 	 *  purposes.  Allocation/De-allocation within this buffer is managed
 	 *  by the STAVMEM driver of the STAPI.  They could be Decimated
 	 *  Picture Buffers, Intermediate Buffers, as deemed necessary for
 	 *  video decoding purposes, for any video decoders on Zeus.
-	 *
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x63580000,
-		.end    = 0x64180000 - 1,  /* 12 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 12MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * DOCSIS Subsystem
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x62000000,
-		.end    = 0x62700000 - 1,	/* 7 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * GHW HAL Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  GraphicsHeap - PowerTV Graphics Heap
-	 *
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x62700000,
-		.end    = 0x63500000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * multi com buffer area
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x26000000,
-		.end    = 0x26020000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
-	 * DMA Ring buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
+	 * DMA Ring buffer (don't need recording buffers)
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 680KiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Display bins buffer for unit0
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit0
-	 *
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,		/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
-	 * Display bins buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit1
-	 *
+	 * Display bins buffer for unit1
 	 */
-	{
-		.name   = "DisplayBins1",
-		.start  = 0x64AD4000,
-		.end    = 0x64AD5000 - 1,  /* 4 KB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * AVFS: player HAL memory
-	 *
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x6430E000,
-		.end    = 0x645D2C00 - 1,  /* 945K * 3 for playback */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 945K * 3 for playback */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * PMEM
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Persistent memory for diagnostics.
-	 *
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Smartcard
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Read and write buffers for Internal/External cards
-	 *
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x64AD1000,
-		.end    = 0x64AD3800 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * KAVNET
-	 *    NP Reset Vector - must be of the form xxCxxxxx
-	 *	   NP Image - must be video bank 1
-	 *	   NP IPC - must be video bank 2
+	 */
+	/* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+	PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+		IORESOURCE_MEM)
+	/* NP Image - must be video bank 1 (320KiB) */
+	PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+	/* NP IPC - must be video bank 2 (512KiB) */
+	PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+	/*
+	 * NAND Flash
+	 */
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
+	/*
+	 * Add other resources here
+	 */
+
+	/*
+	 * End of Resource marker
 	 */
 	{
-		.name   = "NP_Reset_Vector",
-		.start  = 0x27c00000,
-		.end    = 0x27c01000 - 1,
-		.flags  = IORESOURCE_MEM,
+		.flags  = 0,
 	},
-	{
-		.name   = "NP_Image",
-		.start  = 0x27020000,
-		.end    = 0x27060000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "NP_IPC",
-		.start  = 0x63500000,
-		.end    = 0x63580000 - 1,
-		.flags  = IORESOURCE_IO,
-	},
-	{ },
 };
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
index 23a90561..b5537e4 100644
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -22,7 +22,9 @@
  */
 
 #include <linux/init.h>
+#include <linux/ioport.h>
 #include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
 
 /*
  * NON_DVR_CAPABLE CRONUSLITE RESOURCES
@@ -30,261 +32,143 @@
 struct resource non_dvr_cronuslite_resources[] __initdata =
 {
 	/*
-	 *
 	 * VIDEO2 / LX2
-	 *
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 2 image and ram */
-		.start  = 0x60000000,
-		.end    = 0x601FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8KiB block ST231b monitor */
-		.start  = 0x60200000,
-		.end    = 0x60201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x60202000,
-		.end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * Sysaudio Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  DSP_Image_Buff - DSP code and data images (1MB)
-	 *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-	 *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-	 *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-	 *
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (128KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * STAVEM driver/STAPI
 	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
 	 *  This memory area is used for allocating buffers for Video decoding
 	 *  purposes.  Allocation/De-allocation within this buffer is managed
 	 *  by the STAVMEM driver of the STAPI.  They could be Decimated
 	 *  Picture Buffers, Intermediate Buffers, as deemed necessary for
 	 *  video decoding purposes, for any video decoders on Zeus.
-	 *
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x63580000,
-		.end    = 0x63B80000 - 1,  /* 6 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 6MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * DOCSIS Subsystem
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x62000000,
-		.end    = 0x62700000 - 1,	/* 7 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * GHW HAL Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  GraphicsHeap - PowerTV Graphics Heap
-	 *
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x62700000,
-		.end    = 0x63500000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * multi com buffer area
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x26000000,
-		.end    = 0x26020000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
-	 * DMA Ring buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
+	 * DMA Ring buffer (don't need recording buffers)
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x000AA000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 680KiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Display bins buffer for unit0
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit0
-	 *
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,		/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
-	 * Display bins buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit1
-	 *
+	 * Display bins buffer for unit1
 	 */
-	{
-		.name   = "DisplayBins1",
-		.start  = 0x63B83000,
-		.end    = 0x63B84000 - 1,  /* 4 KB total */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * AVFS: player HAL memory
-	 *
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x63B84000,
-		.end    = 0x63E48C00 - 1,  /* 945K * 3 for playback */
-		.flags  = IORESOURCE_IO,
-	},
+	/* 945K * 3 for playback */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * PMEM
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Persistent memory for diagnostics.
-	 *
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Smartcard
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Read and write buffers for Internal/External cards
-	 *
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x63B80000,
-		.end    = 0x63B82800 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * KAVNET
-	 *    NP Reset Vector - must be of the form xxCxxxxx
-	 *	   NP Image - must be video bank 1
-	 *	   NP IPC - must be video bank 2
 	 */
-	{
-		.name   = "NP_Reset_Vector",
-		.start  = 0x27c00000,
-		.end    = 0x27c01000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "NP_Image",
-		.start  = 0x27020000,
-		.end    = 0x27060000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "NP_IPC",
-		.start  = 0x63500000,
-		.end    = 0x63580000 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+	PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+		IORESOURCE_MEM)
+	/* NP Image - must be video bank 1 (320KiB) */
+	PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+	/* NP IPC - must be video bank 2 (512KiB) */
+	PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
 	/*
 	 * NAND Flash
 	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE + 0x400 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
+	/*
+	 * TFTPBuffer
+	 *
+	 *  This buffer is used in some minimal configurations (e.g. two-way
+	 *  loader) for storing software images
+	 */
+	PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
index 018d451..96480a2 100644
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -22,7 +22,9 @@
  */
 
 #include <linux/init.h>
+#include <linux/ioport.h>
 #include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
 
 /*
  * DVR_CAPABLE RESOURCES
@@ -30,280 +32,151 @@
 struct resource dvr_zeus_resources[] __initdata =
 {
 	/*
-	 *
 	 * VIDEO1 / LX1
-	 *
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x20000000,
-		.end    = 0x201FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8KiB block ST231a monitor */
-		.start  = 0x20200000,
-		.end    = 0x20201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x20202000,
-		.end    = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * VIDEO2 / LX2
-	 *
 	 */
-	{
-		.name   = "ST231bImage",	/* Delta-Mu 2 image and ram */
-		.start  = 0x30000000,
-		.end    = 0x301FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231bMonitor",	/* 8KiB block ST231b monitor */
-		.start  = 0x30200000,
-		.end    = 0x30201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory2",
-		.start  = 0x30202000,
-		.end    = 0x31FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 2 image (2MiB) */
+	PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * Sysaudio Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  DSP_Image_Buff - DSP code and data images (1MB)
-	 *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-	 *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-	 *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-	 *
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * STAVEM driver/STAPI
 	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
 	 *  This memory area is used for allocating buffers for Video decoding
 	 *  purposes.  Allocation/De-allocation within this buffer is managed
 	 *  by the STAVMEM driver of the STAPI.  They could be Decimated
 	 *  Picture Buffers, Intermediate Buffers, as deemed necessary for
 	 *  video decoding purposes, for any video decoders on Zeus.
-	 *
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x00000000,
-		.end    = 0x00c00000 - 1,	/* 12 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 12MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * DOCSIS Subsystem
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x40100000,
-		.end    = 0x407fffff,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
 	/*
-	 *
 	 * GHW HAL Driver
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  GraphicsHeap - PowerTV Graphics Heap
-	 *
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x46900000,
-		.end    = 0x47700000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * multi com buffer area
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x47900000,
-		.end    = 0x47920000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+		IORESOURCE_MEM)
+
 	/*
-	 *
 	 * DMA Ring buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x00280000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 2.5MiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Display bins buffer for unit0
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit0
-	 *
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,	/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
-	 * Display bins buffer
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Display Bins for unit1
-	 *
+	 * Display bins buffer for unit1
 	 */
-	{
-		.name   = "DisplayBins1",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,	/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * ITFS
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "ITFS",
-		.start  = 0x00000000,
-		/* 815,104 bytes each for 2 ITFS partitions. */
-		.end    = 0x0018DFFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 815,104 bytes each for 2 ITFS partitions. */
+	PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * AVFS
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Docsis -
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x00000000,
-		/* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
-		.end    = 0x007c2000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "AvfsFileSys",
-		.start  = 0x00000000,
-		.end    = 0x00001000 - 1,  /* 4K */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* 4KiB */
+	PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * PMEM
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Persistent memory for diagnostics.
-	 *
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * Smartcard
-	 *
-	 * This driver requires:
-	 *
-	 * Arbitrary Based Buffers:
-	 *  Read and write buffers for Internal/External cards
-	 *
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
+	/*
+	 * TFTPBuffer
+	 *
+	 *  This buffer is used in some minimal configurations (e.g. two-way
+	 *  loader) for storing software images
+	 */
+	PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
 
 /*
@@ -314,146 +187,118 @@
 	/*
 	 * VIDEO1 / LX1
 	 */
-	{
-		.name   = "ST231aImage",	/* Delta-Mu 1 image and ram */
-		.start  = 0x20000000,
-		.end    = 0x201FFFFF,		/* 2MiB */
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "ST231aMonitor",	/* 8KiB block ST231a monitor */
-		.start  = 0x20200000,
-		.end    = 0x20201FFF,
-		.flags  = IORESOURCE_IO,
-	},
-	{
-		.name   = "MediaMemory1",
-		.start  = 0x20202000,
-		.end    = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-		.flags  = IORESOURCE_IO,
-	},
+	/* Delta-Mu 1 image (2MiB) */
+	PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 monitor (8KiB) */
+	PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+		IORESOURCE_MEM)
+	/* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+	PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * Sysaudio Driver
 	 */
-	{
-		.name   = "DSP_Image_Buff",
-		.start  = 0x00000000,
-		.end    = 0x000FFFFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_CPU_PCM_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00009FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_AUX_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "ADSC_Main_Buff",
-		.start  = 0x00000000,
-		.end    = 0x00003FFF,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* DSP code and data images (1MiB) */
+	PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC CPU PCM buffer (40KiB) */
+	PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC AUX buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+	/* ADSC Main buffer (16KiB) */
+	PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * STAVEM driver/STAPI
 	 */
-	{
-		.name   = "AVMEMPartition0",
-		.start  = 0x00000000,
-		.end    = 0x00600000 - 1,	/* 6 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 6MiB */
+	PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * DOCSIS Subsystem
 	 */
-	{
-		.name   = "Docsis",
-		.start  = 0x40100000,
-		.end    = 0x407fffff,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 7MiB */
+	PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
 	/*
 	 * GHW HAL Driver
 	 */
-	{
-		.name   = "GraphicsHeap",
-		.start  = 0x46900000,
-		.end    = 0x47700000 - 1,	/* 14 MB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* PowerTV Graphics Heap (14MiB) */
+	PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * multi com buffer area
 	 */
-	{
-		.name   = "MulticomSHM",
-		.start  = 0x47900000,
-		.end    = 0x47920000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 128KiB */
+	PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+		IORESOURCE_MEM)
+
 	/*
 	 * DMA Ring buffer
 	 */
-	{
-		.name   = "BMM_Buffer",
-		.start  = 0x00000000,
-		.end    = 0x00280000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 2.5MiB */
+	PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Display bins buffer for unit0
 	 */
-	{
-		.name   = "DisplayBins0",
-		.start  = 0x00000000,
-		.end    = 0x00000FFF,		/* 4 KB total */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 4KiB */
+	PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
-	 *
 	 * AVFS: player HAL memory
-	 *
-	 *
 	 */
-	{
-		.name   = "AvfsDmaMem",
-		.start  = 0x00000000,
-		.end    = 0x002c4c00 - 1,	/* 945K * 3 for playback */
-		.flags  = IORESOURCE_MEM,
-	},
+	/* 945K * 3 for playback */
+	PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * PMEM
 	 */
-	{
-		.name   = "DiagPersistentMemory",
-		.start  = 0x00000000,
-		.end    = 0x10000 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Persistent memory for diagnostics (64KiB) */
+	PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+	     (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Smartcard
 	 */
-	{
-		.name   = "SmartCardInfo",
-		.start  = 0x00000000,
-		.end    = 0x2800 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+	/* Read and write buffers for Internal/External cards (10KiB) */
+	PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * NAND Flash
 	 */
-	{
-		.name   = "NandFlash",
-		.start  = NAND_FLASH_BASE,
-		.end    = NAND_FLASH_BASE + 0x400 - 1,
-		.flags  = IORESOURCE_IO,
-	},
+	/* 10KiB */
+	PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+		IORESOURCE_MEM)
+
+	/*
+	 * TFTPBuffer
+	 *
+	 *  This buffer is used in some minimal configurations (e.g. two-way
+	 *  loader) for storing software images
+	 */
+	PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+		(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
 	/*
 	 * Add other resources here
 	 */
-	{ },
+
+	/*
+	 * End of Resource marker
+	 */
+	{
+		.flags  = 0,
+	},
 };
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
new file mode 100644
index 0000000..8e682df
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc.h
@@ -0,0 +1,70 @@
+/*
+ * Definitions for memory preallocations
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+
+#define KIBIBYTE(n) ((n) * 1024)    /* Number of kibibytes */
+#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
+
+/* "struct resource" array element definition */
+#define PREALLOC(NAME, START, END, FLAGS) {	\
+		.name = (NAME),			\
+		.start = (START),		\
+		.end = (END),			\
+		.flags = (FLAGS)		\
+	},
+
+/* Individual resources in the preallocated resource arrays are defined using
+ *  macros.  These macros are conditionally defined based on their
+ *  corresponding kernel configuration flag:
+ *    - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
+ *    - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
+ *    - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
+ *    - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
+ */
+#ifdef CONFIG_PREALLOC_NORMAL
+#define PREALLOC_NORMAL(name, start, end, flags) \
+   PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_NORMAL(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_TFTP
+#define PREALLOC_TFTP(name, start, end, flags) \
+   PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_TFTP(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_DOCSIS
+#define PREALLOC_DOCSIS(name, start, end, flags) \
+   PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_DOCSIS(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_PMEM
+#define PREALLOC_PMEM(name, start, end, flags) \
+   PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_PMEM(name, start, end, flags)
+#endif
+#endif
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index adc6929..575d219b 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -905,7 +905,7 @@
 	void __iomem *base;
 };
 
-static ssize_t txx9_sram_read(struct kobject *kobj,
+static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t pos, size_t size)
 {
@@ -920,7 +920,7 @@
 	return size;
 }
 
-static ssize_t txx9_sram_write(struct kobject *kobj,
+static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t pos, size_t size)
 {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2e19500..c4c4549 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@
 	select HAVE_SYSCALL_WRAPPERS if PPC64
 	select GENERIC_ATOMIC64 if PPC32
 	select HAVE_PERF_EVENTS
+	select HAVE_REGS_AND_STACK_ACCESS_API
 
 config EARLY_PRINTK
 	bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5cdd7ed..53696da 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -44,6 +44,18 @@
 
 	  This option will slow down process creation somewhat.
 
+config DEBUG_PER_CPU_MAPS
+	bool "Debug access to per_cpu maps"
+	depends on DEBUG_KERNEL
+	depends on SMP
+	default n
+	---help---
+	  Say Y to verify that the per_cpu map being accessed has
+	  been setup.  Adds a fair amount of code to kernel memory
+	  and decreases performance.
+
+	  Say N if unsure.
+
 config HCALL_STATS
 	bool "Hypervisor call instrumentation"
 	depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index bb2465b..ad0df7d 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -44,6 +44,7 @@
 $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
 $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
 
 
@@ -77,7 +78,7 @@
 		cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
 		virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
 		cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
-		gamecube-head.S gamecube.c wii-head.S wii.c
+		gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
 src-boot := $(src-wlib) $(src-plat) empty.c
 
 src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -169,7 +170,7 @@
 		$(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
 
 image-$(CONFIG_PPC_PSERIES)		+= zImage.pseries
-image-$(CONFIG_PPC_MAPLE)		+= zImage.pseries
+image-$(CONFIG_PPC_MAPLE)		+= zImage.maple
 image-$(CONFIG_PPC_IBM_CELL_BLADE)	+= zImage.pseries
 image-$(CONFIG_PPC_PS3)			+= dtbImage.ps3
 image-$(CONFIG_PPC_CELLEB)		+= zImage.pseries
@@ -206,6 +207,8 @@
 image-$(CONFIG_KATMAI)			+= cuImage.katmai
 image-$(CONFIG_WARP)			+= cuImage.warp
 image-$(CONFIG_YOSEMITE)		+= cuImage.yosemite
+image-$(CONFIG_ISS4xx)			+= treeImage.iss4xx \
+					   treeImage.iss4xx-mpic
 
 # Board ports in arch/powerpc/platform/8xx/Kconfig
 image-$(CONFIG_MPC86XADS)		+= cuImage.mpc866ads
@@ -351,7 +354,7 @@
 clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
 	zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
 	zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
-	simpleImage.* otheros.bld *.dtb
+	zImage.maple simpleImage.* otheros.bld *.dtb
 
 # clean up files cached by wrapper
 clean-kernel := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts
new file mode 100644
index 0000000..23e9d9b
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx-mpic.dts
@@ -0,0 +1,155 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *     Copyright (c) 2006, 2007 IBM Corp.
+ *     Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000;
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	model = "ibm,iss-4xx";
+	compatible = "ibm,iss-4xx";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		serial0 = &UART0;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,4xx"; // real CPU changed in sim
+			reg = <0>;
+			clock-frequency = <100000000>; // 100Mhz :-)
+			timebase-frequency = <100000000>;
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "ok";
+		};
+		cpu@1 {
+			device_type = "cpu";
+			model = "PowerPC,4xx"; // real CPU changed in sim
+			reg = <1>;
+			clock-frequency = <100000000>; // 100Mhz :-)
+			timebase-frequency = <100000000>;
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "disabled";
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x01f00100>;
+		};
+		cpu@2 {
+			device_type = "cpu";
+			model = "PowerPC,4xx"; // real CPU changed in sim
+			reg = <2>;
+			clock-frequency = <100000000>; // 100Mhz :-)
+			timebase-frequency = <100000000>;
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "disabled";
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x01f00200>;
+		};
+		cpu@3 {
+			device_type = "cpu";
+			model = "PowerPC,4xx"; // real CPU changed in sim
+			reg = <3>;
+			clock-frequency = <100000000>; // 100Mhz :-)
+			timebase-frequency = <100000000>;
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "disabled";
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x01f00300>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg =  <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+
+	};
+
+	MPIC: interrupt-controller {
+		compatible = "chrp,open-pic";
+		interrupt-controller;
+		dcr-reg = <0xffc00000 0x00030000>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+
+	};
+
+	plb {
+		compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+		clock-frequency = <0>; // Filled in by zImage
+
+		POB0: opb {
+			compatible = "ibm,opb-4xx", "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			/* Wish there was a nicer way of specifying a full 32-bit
+			   range */
+			ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+				  0x80000000 0x00000001 0x80000000 0x80000000>;
+			clock-frequency = <0>; // Filled in by zImage
+			UART0: serial@40000200 {
+				device_type = "serial";
+				compatible = "ns16550a";
+				reg = <0x40000200 0x00000008>;
+				virtual-reg = <0xe0000200>;
+				clock-frequency = <11059200>;
+				current-speed = <115200>;
+				interrupt-parent = <&MPIC>;
+				interrupts = <0x0 0x2>;
+			};
+		};
+	};
+
+	nvrtc {
+		compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+		reg = <0 0xEF703000 0x2000>;
+	};
+	iss-block {
+		compatible = "ibm,iss-sim-block-device";
+		reg = <0 0xEF701000 0x1000>;
+	};
+
+	chosen {
+		linux,stdout-path = "/plb/opb/serial@40000200";
+	};
+};
diff --git a/arch/powerpc/boot/dts/iss4xx.dts b/arch/powerpc/boot/dts/iss4xx.dts
new file mode 100644
index 0000000..4ff6555
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx.dts
@@ -0,0 +1,116 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *    Copyright (c) 2006, 2007 IBM Corp.
+ *    Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	model = "ibm,iss-4xx";
+	compatible = "ibm,iss-4xx";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		serial0 = &UART0;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,4xx"; // real CPU changed in sim
+			reg = <0x00000000>;
+			clock-frequency = <100000000>; // 100Mhz :-)
+			timebase-frequency = <100000000>;
+			i-cache-line-size = <32>; // may need fixup in sim
+			d-cache-line-size = <32>; // may need fixup in sim
+			i-cache-size = <32768>; /* may need fixup in sim */
+			d-cache-size = <32768>; /* may need fixup in sim */
+			dcr-controller;
+			dcr-access-method = "native";
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+	};
+
+	UIC0: interrupt-controller0 {
+		compatible = "ibm,uic-4xx", "ibm,uic";
+		interrupt-controller;
+		cell-index = <0>;
+		dcr-reg = <0x0c0 0x009>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+
+	};
+
+	UIC1: interrupt-controller1 {
+		compatible = "ibm,uic-4xx", "ibm,uic";
+		interrupt-controller;
+		cell-index = <1>;
+		dcr-reg = <0x0d0 0x009>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	plb {
+		compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+		clock-frequency = <0>; // Filled in by zImage
+
+		POB0: opb {
+			compatible = "ibm,opb-4xx", "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			/* Wish there was a nicer way of specifying a full 32-bit
+			   range */
+			ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+				  0x80000000 0x00000001 0x80000000 0x80000000>;
+			clock-frequency = <0>; // Filled in by zImage
+			UART0: serial@40000200 {
+				device_type = "serial";
+				compatible = "ns16550a";
+				reg = <0x40000200 0x00000008>;
+				virtual-reg = <0xe0000200>;
+				clock-frequency = <11059200>;
+				current-speed = <115200>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x0 0x4>;
+			};
+		};
+	};
+
+	nvrtc {
+		compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+		reg = <0 0xEF703000 0x2000>;
+	};
+	iss-block {
+		compatible = "ibm,iss-sim-block-device";
+		reg = <0 0xEF701000 0x1000>;
+	};
+
+	chosen {
+		linux,stdout-path = "/plb/opb/serial@40000200";
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 8a3a4f3..4dd08c3 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -292,7 +292,7 @@
 			fsl,num-channels = <4>;
 			fsl,channel-fifo-len = <24>;
 			fsl,exec-units-mask = <0x97c>;
-			fsl,descriptor-types-mask = <0x3ab0abf>;
+			fsl,descriptor-types-mask = <0x3a30abf>;
 		};
 
 		sata@18000 {
@@ -463,4 +463,18 @@
 				  0 0x00800000>;
 		};
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			gpios = <&mcu_pio 0 0>;
+			default-state = "on";
+		};
+
+		hdd {
+			gpios = <&mcu_pio 1 0>;
+			linux,default-trigger = "ide-disk";
+		};
+	};
 };
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 9e2264b..dbc1b98 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -486,4 +486,18 @@
 				  0 0x00800000>;
 		};
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			gpios = <&mcu_pio 0 0>;
+			default-state = "on";
+		};
+
+		hdd {
+			gpios = <&mcu_pio 1 0>;
+			linux,default-trigger = "ide-disk";
+		};
+	};
 };
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 4e6a1a4..3447eb9 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -470,4 +470,18 @@
 				  0 0x00800000>;
 		};
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			gpios = <&mcu_pio 0 0>;
+			default-state = "on";
+		};
+
+		hdd {
+			gpios = <&mcu_pio 1 0>;
+			linux,default-trigger = "ide-disk";
+		};
+	};
 };
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 72336d5..15560c6 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -436,4 +436,18 @@
 		compatible = "fsl,mpc8349-pci";
 		device_type = "pci";
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			gpios = <&mcu_pio 0 0>;
+			default-state = "on";
+		};
+
+		hdd {
+			gpios = <&mcu_pio 1 0>;
+			linux,default-trigger = "ide-disk";
+		};
+	};
 };
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index df52690..22f64b6 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -19,6 +19,9 @@
 	aliases {
 		serial0 = &serial0;
 		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
 		pci0 = &pci0;
 		pci1 = &pci1;
 	};
@@ -346,6 +349,122 @@
 			};
 		};
 
+		mdio@24000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "fsl,etsec2-mdio";
+			reg = <0x24000 0x1000 0xb0030 0x4>;
+
+			phy0: ethernet-phy@0 {
+				interrupt-parent = <&mpic>;
+				interrupts = <3 1>;
+				reg = <0x0>;
+			};
+
+			phy1: ethernet-phy@1 {
+				interrupt-parent = <&mpic>;
+				interrupts = <2 1>;
+				reg = <0x1>;
+			};
+		};
+
+		mdio@25000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "fsl,etsec2-tbi";
+			reg = <0x25000 0x1000 0xb1030 0x4>;
+
+			tbi0: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
+			};
+		};
+
+		enet0: ethernet@b0000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			device_type = "network";
+			model = "eTSEC";
+			compatible = "fsl,etsec2";
+			fsl,num_rx_queues = <0x8>;
+			fsl,num_tx_queues = <0x8>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupt-parent = <&mpic>;
+			fixed-link = <1 1 1000 0 0>;
+			phy-connection-type = "rgmii-id";
+
+			queue-group@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb0000 0x1000>;
+				interrupts = <29 2 30 2 34 2>;
+			};
+
+			queue-group@1 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb4000 0x1000>;
+				interrupts = <17 2 18 2 24 2>;
+			};
+		};
+
+		enet1: ethernet@b1000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			device_type = "network";
+			model = "eTSEC";
+			compatible = "fsl,etsec2";
+			fsl,num_rx_queues = <0x8>;
+			fsl,num_tx_queues = <0x8>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupt-parent = <&mpic>;
+			phy-handle = <&phy0>;
+			tbi-handle = <&tbi0>;
+			phy-connection-type = "sgmii";
+
+			queue-group@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb1000 0x1000>;
+				interrupts = <35 2 36 2 40 2>;
+			};
+
+			queue-group@1 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb5000 0x1000>;
+				interrupts = <51 2 52 2 67 2>;
+			};
+		};
+
+		enet2: ethernet@b2000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			device_type = "network";
+			model = "eTSEC";
+			compatible = "fsl,etsec2";
+			fsl,num_rx_queues = <0x8>;
+			fsl,num_tx_queues = <0x8>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupt-parent = <&mpic>;
+			phy-handle = <&phy1>;
+			phy-connection-type = "rgmii-id";
+
+			queue-group@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb2000 0x1000>;
+				interrupts = <31 2 32 2 33 2>;
+			};
+
+			queue-group@1 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0xb6000 0x1000>;
+				interrupts = <25 2 26 2 27 2>;
+			};
+		};
+
 		usb@22000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -356,6 +475,11 @@
 			phy_type = "ulpi";
 		};
 
+		/* USB2 is shared with localbus, so it must be disabled
+		   by default. We can't put 'status = "disabled";' here
+		   since U-Boot doesn't clear the status property when
+		   it enables USB2. OTOH, U-Boot does create a new node
+		   when there isn't any. So, just comment it out.
 		usb@23000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -365,6 +489,7 @@
 			interrupts = <46 0x2>;
 			phy_type = "ulpi";
 		};
+		*/
 
 		sdhci@2e000 {
 			compatible = "fsl,p1020-esdhc", "fsl,esdhc";
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
new file mode 100644
index 0000000..fcc4495
--- /dev/null
+++ b/arch/powerpc/boot/treeboot-iss4xx.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ *
+ * Based on earlier code:
+ *   Copyright (C) Paul Mackerras 1997.
+ *
+ *   Matt Porter <mporter@kernel.crashing.org>
+ *   Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *   Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *   Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ *    Copyright 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+static void iss_4xx_fixups(void)
+{
+	ibm4xx_sdram_fixup_memsize();
+}
+
+#define SPRN_PIR	0x11E	/* Processor Indentification Register */
+void platform_init(void)
+{
+	unsigned long end_of_ram = 0x08000000;
+	unsigned long avail_ram = end_of_ram - (unsigned long)_end;
+	u32 pir_reg;
+
+	simple_alloc_init(_end, avail_ram, 128, 64);
+	platform_ops.fixups = iss_4xx_fixups;
+	platform_ops.exit = ibm44x_dbcr_reset;
+	pir_reg = mfspr(SPRN_PIR);
+	fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+	fdt_init(_dtb_start);
+	serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index f4594ed..cb97e75 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -149,6 +149,10 @@
     platformo=$object/of.o
     link_address='0x4000000'
     ;;
+maple)
+    platformo=$object/of.o
+    link_address='0x400000'
+    ;;
 pmac|chrp)
     platformo=$object/of.o
     ;;
@@ -237,6 +241,9 @@
     link_address='0x600000'
     platformo="$object/$platform-head.o $object/$platform.o"
     ;;
+treeboot-iss4xx-mpic)
+    platformo="$object/treeboot-iss4xx.o"
+    ;;
 esac
 
 vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -321,7 +328,7 @@
 
 # post-processing needed for some platforms
 case "$platform" in
-pseries|chrp)
+pseries|chrp|maple)
     $objbin/addnote "$ofile"
     ;;
 coff)
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
new file mode 100644
index 0000000..8683cbc
--- /dev/null
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -0,0 +1,1026 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.33
+# Thu Mar  4 11:50:12 2010
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_PPC_BOOK3S_32 is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+CONFIG_4xx=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+CONFIG_PPC_MMU_NOHASH=y
+CONFIG_PPC_MMU_NOHASH_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_NOT_COHERENT_CACHE is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+CONFIG_GENERIC_TBSYNC=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+CONFIG_PPC_47x=y
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_SAM440EP is not set
+# CONFIG_SEQUOIA is not set
+# CONFIG_TAISHAN is not set
+# CONFIG_KATMAI is not set
+# CONFIG_RAINIER is not set
+# CONFIG_WARP is not set
+# CONFIG_ARCHES is not set
+# CONFIG_CANYONLANDS is not set
+# CONFIG_GLACIER is not set
+# CONFIG_REDWOOD is not set
+# CONFIG_EIGER is not set
+# CONFIG_YOSEMITE is not set
+CONFIG_ISS4xx=y
+# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
+# CONFIG_PPC44x_SIMPLE is not set
+# CONFIG_PPC4xx_GPIO is not set
+# CONFIG_IPIC is not set
+CONFIG_MPIC=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_OF_RTC=y
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_STDBINUTILS=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/issblk0"
+CONFIG_EXTRA_TARGETS=""
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_4xx_SOC=y
+CONFIG_PPC_PCI_CHOICE=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_NETDEVICES is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+CONFIG_PPC_EARLY_DEBUG_44x=y
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+# CONFIG_PPC_EARLY_DEBUG_USBGECKO is not set
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x40000200
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 12980d5..dad617e 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -988,7 +988,7 @@
 CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
 # CONFIG_NS83820 is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 41de3dd..16a1458 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -804,7 +804,7 @@
 CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
 # CONFIG_NS83820 is not set
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 81de6eb..725634f 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -12,8 +12,12 @@
 #define L1_CACHE_SHIFT		6
 #define MAX_COPY_PREFETCH	4
 #elif defined(CONFIG_PPC32)
-#define L1_CACHE_SHIFT		5
 #define MAX_COPY_PREFETCH	4
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT		7
+#else
+#define L1_CACHE_SHIFT		5
+#endif
 #else /* CONFIG_PPC64 */
 #define L1_CACHE_SHIFT		7
 #endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index abb833b..e3cba4e 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -72,6 +72,7 @@
 extern int machine_check_440A(struct pt_regs *regs);
 extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
 
 /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
 struct cpu_spec {
@@ -365,6 +366,7 @@
 #define CPU_FTRS_44X	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
 #define CPU_FTRS_440x6	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
 	    CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X	(CPU_FTRS_440x6)
 #define CPU_FTRS_E200	(CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
 	    CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
 	    CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@@ -453,6 +455,9 @@
 #ifdef CONFIG_44x
 	    CPU_FTRS_44X | CPU_FTRS_440x6 |
 #endif
+#ifdef CONFIG_PPC_47x
+	    CPU_FTRS_47X |
+#endif
 #ifdef CONFIG_E200
 	    CPU_FTRS_E200 |
 #endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index f027581..5119b7d 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -228,6 +228,7 @@
 #define H_JOIN			0x298
 #define H_VASI_STATE            0x2A4
 #define H_ENABLE_CRQ		0x2B0
+#define H_GET_EM_PARMS		0x2B8
 #define H_SET_MPP		0x2D0
 #define H_GET_MPP		0x2D4
 #define MAX_HCALL_OPCODE	H_GET_MPP
@@ -281,6 +282,7 @@
  */
 #define PLPAR_HCALL9_BUFSIZE 9
 long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
 
 /* For hcall instrumentation.  One structure per-hcall, per-CPU */
 struct hcall_stats {
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 7e06b43..a6ca6da 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -31,6 +31,10 @@
 #define KEXEC_ARCH KEXEC_ARCH_PPC
 #endif
 
+#define KEXEC_STATE_NONE 0
+#define KEXEC_STATE_IRQS_OFF 1
+#define KEXEC_STATE_REAL_MODE 2
+
 #ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
 #include <asm/reg.h>
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index 9163695..bca8fdc 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -26,6 +26,7 @@
 	KM_SOFTIRQ1,
 	KM_PPC_SYNC_PAGE,
 	KM_PPC_SYNC_ICACHE,
+	KM_KDB,
 	KM_TYPE_NR
 };
 
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 0372669..bf52d70 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -40,7 +40,7 @@
 #define PPC44x_TLB_I		0x00000400      /* Caching is inhibited */
 #define PPC44x_TLB_M		0x00000200      /* Memory is coherent */
 #define PPC44x_TLB_G		0x00000100      /* Memory is guarded */
-#define PPC44x_TLB_E		0x00000080      /* Memory is guarded */
+#define PPC44x_TLB_E		0x00000080      /* Memory is little endian */
 
 #define PPC44x_TLB_PERM_MASK	0x0000003f
 #define PPC44x_TLB_UX		0x00000020      /* User execution */
@@ -53,6 +53,52 @@
 /* Number of TLB entries */
 #define PPC44x_TLB_SIZE		64
 
+/* 47x bits */
+#define PPC47x_MMUCR_TID	0x0000ffff
+#define PPC47x_MMUCR_STS	0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK	0xfffff000      /* Effective Page Number */
+#define PPC47x_TLB0_VALID	0x00000800      /* Valid flag */
+#define PPC47x_TLB0_TS		0x00000400	/* Translation address space */
+#define PPC47x_TLB0_4K		0x00000000
+#define PPC47x_TLB0_16K		0x00000010
+#define PPC47x_TLB0_64K		0x00000030
+#define PPC47x_TLB0_1M		0x00000070
+#define PPC47x_TLB0_16M		0x000000f0
+#define PPC47x_TLB0_256M	0x000001f0
+#define PPC47x_TLB0_1G		0x000003f0
+#define PPC47x_TLB0_BOLTED_R	0x00000008	/* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK	0xfffff000      /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK	0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK	0x0003ff80
+#define PPC47x_TLB2_IL1I	0x00020000      /* Memory is guarded */
+#define PPC47x_TLB2_IL1D	0x00010000      /* Memory is guarded */
+#define PPC47x_TLB2_U0		0x00008000      /* User 0 */
+#define PPC47x_TLB2_U1		0x00004000      /* User 1 */
+#define PPC47x_TLB2_U2		0x00002000      /* User 2 */
+#define PPC47x_TLB2_U3		0x00001000      /* User 3 */
+#define PPC47x_TLB2_W		0x00000800      /* Caching is write-through */
+#define PPC47x_TLB2_I		0x00000400      /* Caching is inhibited */
+#define PPC47x_TLB2_M		0x00000200      /* Memory is coherent */
+#define PPC47x_TLB2_G		0x00000100      /* Memory is guarded */
+#define PPC47x_TLB2_E		0x00000080      /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK	0x0000003f
+#define PPC47x_TLB2_UX		0x00000020      /* User execution */
+#define PPC47x_TLB2_UW		0x00000010      /* User write */
+#define PPC47x_TLB2_UR		0x00000008      /* User read */
+#define PPC47x_TLB2_SX		0x00000004      /* Super execution */
+#define PPC47x_TLB2_SW		0x00000002      /* Super write */
+#define PPC47x_TLB2_SR		0x00000001      /* Super read */
+#define PPC47x_TLB2_U_RWX	(PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX	(PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW	(PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG		(PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
 #ifndef __ASSEMBLY__
 
 extern unsigned int tlb_44x_hwater;
@@ -79,12 +125,15 @@
 
 #if (PAGE_SHIFT == 12)
 #define PPC44x_TLBE_SIZE	PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_4K
 #define mmu_virtual_psize	MMU_PAGE_4K
 #elif (PAGE_SHIFT == 14)
 #define PPC44x_TLBE_SIZE	PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_16K
 #define mmu_virtual_psize	MMU_PAGE_16K
 #elif (PAGE_SHIFT == 16)
 #define PPC44x_TLBE_SIZE	PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_64K
 #define mmu_virtual_psize	MMU_PAGE_64K
 #elif (PAGE_SHIFT == 18)
 #define PPC44x_TLBE_SIZE	PPC44x_TLB_256K
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 7ffbb65..7ebf42e 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -18,6 +18,7 @@
 #define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
 #define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
 #define MMU_FTR_TYPE_3E			ASM_CONST(0x00000020)
+#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000040)
 
 /*
  * This is individual features
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 35acac9..aac87cb 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -30,7 +30,7 @@
  */
 
 extern int numa_cpu_lookup_table[];
-extern cpumask_t numa_cpumask_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern unsigned long max_pfn;
 #endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 61913d9..e000cce 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -463,9 +463,6 @@
 /* Request IPIs on primary mpic */
 extern void mpic_request_ipis(void);
 
-/* Send an IPI (non offseted number 0..3) */
-extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
-
 /* Send a message (IPI) to a given target (cpu number or MSG_*) */
 void smp_mpic_message_pass(int target, int msg);
 
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a011603..971dfa4 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -82,6 +82,7 @@
 	s16 hw_cpu_id;			/* Physical processor number */
 	u8 cpu_start;			/* At startup, processor spins until */
 					/* this becomes non-zero. */
+	u8 kexec_state;		/* set when kexec down has irqs off */
 #ifdef CONFIG_PPC_STD_MMU_64
 	struct slb_shadow *slb_shadow_ptr;
 
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 94942d6..1ca1102 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -19,6 +19,8 @@
 	u32 io1, io2;
 	int propsize;
 	int count = 0;
+	int virq;
+
 	for (np = NULL; (np = of_find_compatible_node(np,
 						      "parallel",
 						      "pnpPNP,400")) != NULL;) {
@@ -26,10 +28,13 @@
 		if (!prop || propsize > 6*sizeof(u32))
 			continue;
 		io1 = prop[1]; io2 = prop[2];
-		prop = of_get_property(np, "interrupts", NULL);
-		if (!prop)
+
+		virq = irq_of_parse_and_map(np, 0);
+		if (virq == NO_IRQ)
 			continue;
-		if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
+
+		if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+				!= NULL)
 			count++;
 	}
 	return count;
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 605f5c5..292725c 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,6 +11,12 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 
+struct vmemmap_backing {
+	struct vmemmap_backing *list;
+	unsigned long phys;
+	unsigned long virt_addr;
+};
+
 /*
  * Functions that deal with pagetables that could be at any level of
  * the table need to be passed an "index_size" so they know how to
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 55646ad..a7db96f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -287,7 +287,7 @@
 #define pmd_page_vaddr(pmd)	\
 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 #define pmd_page(pmd)		\
-	(mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 #else
 #define pmd_page_vaddr(pmd)	\
 	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9e2d84c..5d8be04 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -89,6 +89,7 @@
 
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_stack_pointer(regs) ((regs)->gpr[1])
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
 #define regs_return_value(regs) ((regs)->gpr[3])
 
 #ifdef CONFIG_SMP
@@ -141,6 +142,69 @@
 #define arch_has_block_step()	(!cpu_has_feature(CPU_FTR_601))
 #define ARCH_HAS_USER_SINGLE_STEP_INFO
 
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:	   pt_regs from which register value is gotten
+ * @offset:    offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+						unsigned int offset)
+{
+	if (unlikely(offset > MAX_REG_OFFSET))
+		return 0;
+	return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+						unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:	pt_regs which contains kernel stack pointer.
+ * @n:		stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+						      unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5572e86..b68f025 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -817,6 +817,7 @@
 #define PVR_403GC	0x00200200
 #define PVR_403GCX	0x00201400
 #define PVR_405GP	0x40110000
+#define PVR_476		0x11a52000
 #define PVR_STB03XXX	0x40310000
 #define PVR_NP405H	0x41410000
 #define PVR_NP405L	0x41610000
@@ -853,6 +854,9 @@
 #define PVR_8245	0x80811014
 #define PVR_8260	PVR_8240
 
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS	0x00052000
+
 /* 64-bit processors */
 /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
 #define PV_NORTHSTAR	0x0033
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 414d434..5304a37 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -191,6 +191,10 @@
 #define MCSR_DCFP	0x01000000 /* D-Cache Flush Parity Error */
 #define MCSR_IMPE	0x00800000 /* Imprecise Machine Check Exception */
 
+#define PPC47x_MCSR_GPR	0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR	0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR	0x00400000 /* Imprecise Machine Check Exception */
+
 #ifdef CONFIG_E500
 #define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
 #define MCSR_ICPERR 	0x40000000UL /* I-Cache Parity Error */
@@ -604,5 +608,25 @@
 #define DBCR_JOI	0x00000002	/* JTAG Serial Outbound Int. Enable */
 #define DBCR_JII	0x00000001	/* JTAG Serial Inbound Int. Enable */
 #endif /* 403GCX */
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR		830
+#define SPRN_USPCR		831
+#define SPRN_ISPCR		829
+#define SPRN_MMUBE0		820
+#define MMUBE0_IBE0_SHIFT	24
+#define MMUBE0_IBE1_SHIFT	16
+#define MMUBE0_IBE2_SHIFT	8
+#define MMUBE0_VBE0		0x00000004
+#define MMUBE0_VBE1		0x00000002
+#define MMUBE0_VBE2		0x00000001
+#define SPRN_MMUBE1		821
+#define MMUBE1_IBE3_SHIFT	24
+#define MMUBE1_IBE4_SHIFT	16
+#define MMUBE1_IBE5_SHIFT	8
+#define MMUBE1_VBE3		0x00000004
+#define MMUBE1_VBE4		0x00000002
+#define MMUBE1_VBE5		0x00000001
+
 #endif /* __ASM_POWERPC_REG_BOOKE_H__ */
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1d3b270..66e237b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -40,7 +40,7 @@
 DECLARE_PER_CPU(unsigned int, cpu_pvr);
 
 #ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(const struct cpumask *map);
 int generic_cpu_disable(void);
 int generic_cpu_enable(unsigned int cpu);
 void generic_cpu_die(unsigned int cpu);
@@ -68,8 +68,19 @@
 }
 #endif
 
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+	return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+	return per_cpu(cpu_core_map, cpu);
+}
+
 extern int cpu_to_core_id(int cpu);
 
 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@@ -93,7 +104,6 @@
 void smp_init_cell(void);
 void smp_init_celleb(void);
 void smp_setup_cpu_maps(void);
-void smp_setup_cpu_sibling_map(void);
 
 extern int __cpu_disable(void);
 extern void __cpu_die(unsigned int cpu);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8eaec31..32adf72 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -8,6 +8,26 @@
 
 #ifdef CONFIG_NUMA
 
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
 #include <asm/mmzone.h>
 
 static inline int cpu_to_node(int cpu)
@@ -19,7 +39,7 @@
 
 #define cpumask_of_node(node) ((node) == -1 ?				\
 			       cpu_all_mask :				\
-			       &numa_cpumask_lookup_table[node])
+			       node_to_cpumask_map[node])
 
 int of_node_to_nid(struct device_node *device);
 
@@ -102,8 +122,8 @@
 #ifdef CONFIG_PPC64
 #include <asm/smp.h>
 
-#define topology_thread_cpumask(cpu)	(&per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu)	(&per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu)	(per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)	(per_cpu(cpu_core_map, cpu))
 #define topology_core_id(cpu)		(cpu_to_core_id(cpu))
 #endif
 #endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c09138d..28a686f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -183,6 +183,7 @@
 #endif /* CONFIG_PPC_STD_MMU_64 */
 	DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
 	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+	DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
 	DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
 	DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
 	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
@@ -447,6 +448,14 @@
 	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
 	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
 #endif
+#ifdef CONFIG_FSL_BOOKE
+	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+	DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
+	DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
+	DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
+	DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
+	DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
+#endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8af4949..9556be9 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1701,6 +1701,35 @@
 		.machine_check		= machine_check_440A,
 		.platform		= "ppc440",
 	},
+	{ /* 476 core */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x11a50000,
+		.cpu_name		= "476",
+		.cpu_features		= CPU_FTRS_47X,
+		.cpu_user_features	= COMMON_USER_BOOKE |
+			PPC_FEATURE_HAS_FPU,
+		.mmu_features		= MMU_FTR_TYPE_47x |
+			MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 128,
+		.machine_check		= machine_check_47x,
+		.platform		= "ppc470",
+	},
+	{ /* 476 iss */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00050000,
+		.cpu_name		= "476",
+		.cpu_features		= CPU_FTRS_47X,
+		.cpu_user_features	= COMMON_USER_BOOKE |
+			PPC_FEATURE_HAS_FPU,
+		.cpu_user_features	= COMMON_USER_BOOKE,
+		.mmu_features		= MMU_FTR_TYPE_47x |
+			MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 128,
+		.machine_check		= machine_check_47x,
+		.platform		= "ppc470",
+	},
 	{	/* default match */
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 6f4613d..8c066d6 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -162,6 +162,32 @@
 	/* Leave the IPI callback set */
 }
 
+/* wait for all the CPUs to hit real mode but timeout if they don't come in */
+static void crash_kexec_wait_realmode(int cpu)
+{
+	unsigned int msecs;
+	int i;
+
+	msecs = 10000;
+	for (i=0; i < NR_CPUS && msecs > 0; i++) {
+		if (i == cpu)
+			continue;
+
+		while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+			barrier();
+			if (!cpu_possible(i)) {
+				break;
+			}
+			if (!cpu_online(i)) {
+				break;
+			}
+			msecs--;
+			mdelay(1);
+		}
+	}
+	mb();
+}
+
 /*
  * This function will be called by secondary cpus or by kexec cpu
  * if soft-reset is activated to stop some CPUs.
@@ -347,10 +373,12 @@
 EXPORT_SYMBOL(crash_shutdown_unregister);
 
 static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+static int crash_shutdown_cpu = -1;
 
 static int handle_fault(struct pt_regs *regs)
 {
-	longjmp(crash_shutdown_buf, 1);
+	if (crash_shutdown_cpu == smp_processor_id())
+		longjmp(crash_shutdown_buf, 1);
 	return 0;
 }
 
@@ -375,11 +403,14 @@
 	for_each_irq(i) {
 		struct irq_desc *desc = irq_to_desc(i);
 
+		if (!desc || !desc->chip || !desc->chip->eoi)
+			continue;
+
 		if (desc->status & IRQ_INPROGRESS)
 			desc->chip->eoi(i);
 
 		if (!(desc->status & IRQ_DISABLED))
-			desc->chip->disable(i);
+			desc->chip->shutdown(i);
 	}
 
 	/*
@@ -388,6 +419,7 @@
 	 */
 	old_handler = __debugger_fault_handler;
 	__debugger_fault_handler = handle_fault;
+	crash_shutdown_cpu = smp_processor_id();
 	for (i = 0; crash_shutdown_handles[i]; i++) {
 		if (setjmp(crash_shutdown_buf) == 0) {
 			/*
@@ -401,6 +433,7 @@
 			asm volatile("sync; isync");
 		}
 	}
+	crash_shutdown_cpu = -1;
 	__debugger_fault_handler = old_handler;
 
 	/*
@@ -412,6 +445,7 @@
 	crash_kexec_prepare_cpus(crashing_cpu);
 	cpu_set(crashing_cpu, cpus_in_crash);
 	crash_kexec_stop_spus();
+	crash_kexec_wait_realmode(crashing_cpu);
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(1, 0);
 }
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1175a85..ed4aeb9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -373,11 +373,13 @@
 	bnel-	load_dbcr0
 #endif
 #ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
 	lis	r4,icache_44x_need_flush@ha
 	lwz	r5,icache_44x_need_flush@l(r4)
 	cmplwi	cr0,r5,0
 	bne-	2f
 1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 #endif /* CONFIG_44x */
 BEGIN_FTR_SECTION
 	lwarx	r7,0,r1
@@ -848,6 +850,9 @@
 	/* interrupts are hard-disabled at this point */
 restore:
 #ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+	b	1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 	lis	r4,icache_44x_need_flush@ha
 	lwz	r5,icache_44x_need_flush@l(r4)
 	cmplwi	cr0,r5,0
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e3be98f..3e423fb 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -735,8 +735,11 @@
 	std	r3,_DAR(r1)
 	std	r4,_DSISR(r1)
 
-	andis.	r0,r4,0xa450		/* weird error? */
+	andis.	r0,r4,0xa410		/* weird error? */
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
+	andis.  r0,r4,DSISR_DABRMATCH@h
+	bne-    handle_dabr_fault
+
 BEGIN_FTR_SECTION
 	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
 	bne-	do_ste_alloc		/* If so handle it */
@@ -823,6 +826,14 @@
 	bl	.raw_local_irq_restore
 	b	11f
 
+/* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+	ld      r4,_DAR(r1)
+	ld      r5,_DSISR(r1)
+	addi    r3,r1,STACK_FRAME_OVERHEAD
+	bl      .do_dabr
+	b       .ret_from_except_lite
+
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
 	ENABLE_INTS
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 711368b..5ab484e 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/synch.h>
 #include "head_booke.h"
 
 
@@ -69,165 +70,7 @@
 	mr	r27,r7
 	li	r24,0		/* CPU number */
 
-/*
- * In case the firmware didn't do it, we apply some workarounds
- * that are good for all 440 core variants here
- */
-	mfspr	r3,SPRN_CCR0
-	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
-	isync
-	mtspr	SPRN_CCR0,r3
-	isync
-	sync
-
-/*
- * Set up the initial MMU state
- *
- * We are still executing code at the virtual address
- * mappings set by the firmware for the base of RAM.
- *
- * We first invalidate all TLB entries but the one
- * we are running from.  We then load the KERNELBASE
- * mappings so we can begin to use kernel addresses
- * natively and so the interrupt vector locations are
- * permanently pinned (necessary since Book E
- * implementations always have translation enabled).
- *
- * TODO: Use the known TLB entry we are running from to
- *	 determine which physical region we are located
- *	 in.  This can be used to determine where in RAM
- *	 (on a shared CPU system) or PCI memory space
- *	 (on a DRAMless system) we are located.
- *       For now, we assume a perfect world which means
- *	 we are located at the base of DRAM (physical 0).
- */
-
-/*
- * Search TLB for entry that we are currently using.
- * Invalidate all entries but the one we are using.
- */
-	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
-	mfspr	r3,SPRN_PID			/* Get PID */
-	mfmsr	r4				/* Get MSR */
-	andi.	r4,r4,MSR_IS@l			/* TS=1? */
-	beq	wmmucr				/* If not, leave STS=0 */
-	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
-wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
-	sync
-
-	bl	invstr				/* Find our address */
-invstr:	mflr	r5				/* Make it accessible */
-	tlbsx	r23,0,r5			/* Find entry we are in */
-	li	r4,0				/* Start at TLB entry 0 */
-	li	r3,0				/* Set PAGEID inval value */
-1:	cmpw	r23,r4				/* Is this our entry? */
-	beq	skpinv				/* If so, skip the inval */
-	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
-skpinv:	addi	r4,r4,1				/* Increment */
-	cmpwi	r4,64				/* Are we done? */
-	bne	1b				/* If not, repeat */
-	isync					/* If so, context change */
-
-/*
- * Configure and load pinned entry into TLB slot 63.
- */
-
-	lis	r3,PAGE_OFFSET@h
-	ori	r3,r3,PAGE_OFFSET@l
-
-	/* Kernel is at the base of RAM */
-	li r4, 0			/* Load the kernel physical address */
-
-	/* Load the kernel PID = 0 */
-	li	r0,0
-	mtspr	SPRN_PID,r0
-	sync
-
-	/* Initialize MMUCR */
-	li	r5,0
-	mtspr	SPRN_MMUCR,r5
-	sync
-
- 	/* pageid fields */
-	clrrwi	r3,r3,10		/* Mask off the effective page number */
-	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
-
-	/* xlat fields */
-	clrrwi	r4,r4,10		/* Mask off the real page number */
-					/* ERPN is 0 for first 4GB page */
-
-	/* attrib fields */
-	/* Added guarded bit to protect against speculative loads/stores */
-	li	r5,0
-	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
-
-        li      r0,63                    /* TLB slot 63 */
-
-	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
-	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
-	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
-
-	/* Force context change */
-	mfmsr	r0
-	mtspr	SPRN_SRR1, r0
-	lis	r0,3f@h
-	ori	r0,r0,3f@l
-	mtspr	SPRN_SRR0,r0
-	sync
-	rfi
-
-	/* If necessary, invalidate original entry we used */
-3:	cmpwi	r23,63
-	beq	4f
-	li	r6,0
-	tlbwe   r6,r23,PPC44x_TLB_PAGEID
-	isync
-
-4:
-#ifdef CONFIG_PPC_EARLY_DEBUG_44x
-	/* Add UART mapping for early debug. */
-
- 	/* pageid fields */
-	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
-	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
-
-	/* xlat fields */
-	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
-	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
-
-	/* attrib fields */
-	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
-        li      r0,62                    /* TLB slot 0 */
-
-	tlbwe	r3,r0,PPC44x_TLB_PAGEID
-	tlbwe	r4,r0,PPC44x_TLB_XLAT
-	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
-
-	/* Force context change */
-	isync
-#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
-
-	/* Establish the interrupt vector offsets */
-	SET_IVOR(0,  CriticalInput);
-	SET_IVOR(1,  MachineCheck);
-	SET_IVOR(2,  DataStorage);
-	SET_IVOR(3,  InstructionStorage);
-	SET_IVOR(4,  ExternalInput);
-	SET_IVOR(5,  Alignment);
-	SET_IVOR(6,  Program);
-	SET_IVOR(7,  FloatingPointUnavailable);
-	SET_IVOR(8,  SystemCall);
-	SET_IVOR(9,  AuxillaryProcessorUnavailable);
-	SET_IVOR(10, Decrementer);
-	SET_IVOR(11, FixedIntervalTimer);
-	SET_IVOR(12, WatchdogTimer);
-	SET_IVOR(13, DataTLBError);
-	SET_IVOR(14, InstructionTLBError);
-	SET_IVOR(15, DebugCrit);
-
-	/* Establish the interrupt vector base */
-	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
-	mtspr	SPRN_IVPR,r4
+	bl	init_cpu_state
 
 	/*
 	 * This is where the main kernel code starts.
@@ -349,7 +192,7 @@
 #endif
 
 	/* Data TLB Error Interrupt */
-	START_EXCEPTION(DataTLBError)
+	START_EXCEPTION(DataTLBError44x)
 	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
 	mtspr	SPRN_SPRG_WSCRATCH1, r11
 	mtspr	SPRN_SPRG_WSCRATCH2, r12
@@ -440,7 +283,7 @@
 	mfspr	r10,SPRN_DEAR
 
 	 /* Jump to common tlb load */
-	b	finish_tlb_load
+	b	finish_tlb_load_44x
 
 2:
 	/* The bailout.  Restore registers to pre-exception conditions
@@ -460,7 +303,7 @@
 	 * information from different registers and bailout
 	 * to a different point.
 	 */
-	START_EXCEPTION(InstructionTLBError)
+	START_EXCEPTION(InstructionTLBError44x)
 	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
 	mtspr	SPRN_SPRG_WSCRATCH1, r11
 	mtspr	SPRN_SPRG_WSCRATCH2, r12
@@ -536,7 +379,7 @@
 	mfspr	r10,SPRN_SRR0
 
 	/* Jump to common TLB load point */
-	b	finish_tlb_load
+	b	finish_tlb_load_44x
 
 2:
 	/* The bailout.  Restore registers to pre-exception conditions
@@ -550,15 +393,7 @@
 	mfspr	r10, SPRN_SPRG_RSCRATCH0
 	b	InstructionStorage
 
-	/* Debug Interrupt */
-	DEBUG_CRIT_EXCEPTION
-
 /*
- * Local functions
-  */
-
-/*
-
  * Both the instruction and data TLB miss get to this
  * point to load the TLB.
  * 	r10 - EA of fault
@@ -568,7 +403,7 @@
  *	MMUCR - loaded with proper value when we get here
  *	Upon exit, we reload everything and RFI.
  */
-finish_tlb_load:
+finish_tlb_load_44x:
 	/* Combine RPN & ERPN an write WS 0 */
 	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 	tlbwe	r11,r13,PPC44x_TLB_XLAT
@@ -601,6 +436,227 @@
 	mfspr	r10, SPRN_SPRG_RSCRATCH0
 	rfi					/* Force context change */
 
+/* TLB error interrupts for 476
+ */
+#ifdef CONFIG_PPC_47x
+	START_EXCEPTION(DataTLBError47x)
+	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
+	mtspr	SPRN_SPRG_WSCRATCH1,r11
+	mtspr	SPRN_SPRG_WSCRATCH2,r12
+	mtspr	SPRN_SPRG_WSCRATCH3,r13
+	mfcr	r11
+	mtspr	SPRN_SPRG_WSCRATCH4,r11
+	mfspr	r10,SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11,PAGE_OFFSET@h
+	cmplw	cr0,r10,r11
+	blt+	3f
+	lis	r11,swapper_pg_dir@h
+	ori	r11,r11, swapper_pg_dir@l
+	li	r12,0			/* MMUCR = 0 */
+	b	4f
+
+	/* Get the PGD for the current thread and setup MMUCR */
+3:	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+	mfspr   r12,SPRN_PID		/* Get PID */
+4:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
+
+	/* Mask of required permission bits. Note that while we
+	 * do copy ESR:ST to _PAGE_RW position as trying to write
+	 * to an RO page is pretty common, we don't do it with
+	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
+	 * event so I'd rather take the overhead when it happens
+	 * rather than adding an instruction here. We should measure
+	 * whether the whole thing is worth it in the first place
+	 * as we could avoid loading SPRN_ESR completely in the first
+	 * place...
+	 *
+	 * TODO: Is it worth doing that mfspr & rlwimi in the first
+	 *       place or can we save a couple of instructions here ?
+	 */
+	mfspr	r12,SPRN_ESR
+	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
+	rlwimi	r13,r12,10,30,30
+
+	/* Load the PTE */
+	/* Compute pgdir/pmd offset */
+	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
+
+	/* Word 0 is EPN,V,TS,DSIZ */
+	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
+	li	r12,0
+	tlbwe	r10,r12,0
+
+	/* XXX can we do better ? Need to make sure tlbwe has established
+	 * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+	isync
+#endif
+
+	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
+	/* Compute pte address */
+	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+	beq	2f			/* Bail if no table */
+	lwz	r11,0(r12)		/* Get high word of pte entry */
+
+	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+	 * bottom of r12 to create a data dependency... We can also use r10
+	 * as destination nowadays
+	 */
+#ifdef CONFIG_SMP
+	lwsync
+#endif
+	lwz	r12,4(r12)		/* Get low word of pte entry */
+
+	andc.	r13,r13,r12		/* Check permission */
+
+	 /* Jump to common tlb load */
+	beq	finish_tlb_load_47x
+
+2:	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11,SPRN_SPRG_RSCRATCH4
+	mtcr	r11
+	mfspr	r13,SPRN_SPRG_RSCRATCH3
+	mfspr	r12,SPRN_SPRG_RSCRATCH2
+	mfspr	r11,SPRN_SPRG_RSCRATCH1
+	mfspr	r10,SPRN_SPRG_RSCRATCH0
+	b	DataStorage
+
+	/* Instruction TLB Error Interrupt */
+	/*
+	 * Nearly the same as above, except we get our
+	 * information from different registers and bailout
+	 * to a different point.
+	 */
+	START_EXCEPTION(InstructionTLBError47x)
+	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
+	mtspr	SPRN_SPRG_WSCRATCH1,r11
+	mtspr	SPRN_SPRG_WSCRATCH2,r12
+	mtspr	SPRN_SPRG_WSCRATCH3,r13
+	mfcr	r11
+	mtspr	SPRN_SPRG_WSCRATCH4,r11
+	mfspr	r10,SPRN_SRR0		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11,PAGE_OFFSET@h
+	cmplw	cr0,r10,r11
+	blt+	3f
+	lis	r11,swapper_pg_dir@h
+	ori	r11,r11, swapper_pg_dir@l
+	li	r12,0			/* MMUCR = 0 */
+	b	4f
+
+	/* Get the PGD for the current thread and setup MMUCR */
+3:	mfspr	r11,SPRN_SPRG_THREAD
+	lwz	r11,PGDIR(r11)
+	mfspr   r12,SPRN_PID		/* Get PID */
+4:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
+
+	/* Make up the required permissions */
+	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+
+	/* Load PTE */
+	/* Compute pgdir/pmd offset */
+	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
+
+	/* Word 0 is EPN,V,TS,DSIZ */
+	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
+	li	r12,0
+	tlbwe	r10,r12,0
+
+	/* XXX can we do better ? Need to make sure tlbwe has established
+	 * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+	isync
+#endif
+
+	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
+	/* Compute pte address */
+	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+	beq	2f			/* Bail if no table */
+
+	lwz	r11,0(r12)		/* Get high word of pte entry */
+	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+	 * bottom of r12 to create a data dependency... We can also use r10
+	 * as destination nowadays
+	 */
+#ifdef CONFIG_SMP
+	lwsync
+#endif
+	lwz	r12,4(r12)		/* Get low word of pte entry */
+
+	andc.	r13,r13,r12		/* Check permission */
+
+	/* Jump to common TLB load point */
+	beq	finish_tlb_load_47x
+
+2:	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG_RSCRATCH4
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG_RSCRATCH3
+	mfspr	r12, SPRN_SPRG_RSCRATCH2
+	mfspr	r11, SPRN_SPRG_RSCRATCH1
+	mfspr	r10, SPRN_SPRG_RSCRATCH0
+	b	InstructionStorage
+
+/*
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ * 	r10 - free to use
+ * 	r11 - PTE high word value
+ *	r12 - PTE low word value
+ *      r13 - free to use
+ *	MMUCR - loaded with proper value when we get here
+ *	Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load_47x:
+	/* Combine RPN & ERPN an write WS 1 */
+	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
+	tlbwe	r11,r13,1
+
+	/* And make up word 2 */
+	li	r10,0xf85			/* Mask to apply from PTE */
+	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
+	and	r11,r12,r10			/* Mask PTE bits to keep */
+	andi.	r10,r12,_PAGE_USER		/* User page ? */
+	beq	1f				/* nope, leave U bits empty */
+	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
+1:	tlbwe	r11,r13,2
+
+	/* Done...restore registers and get out of here.
+	*/
+	mfspr	r11, SPRN_SPRG_RSCRATCH4
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG_RSCRATCH3
+	mfspr	r12, SPRN_SPRG_RSCRATCH2
+	mfspr	r11, SPRN_SPRG_RSCRATCH1
+	mfspr	r10, SPRN_SPRG_RSCRATCH0
+	rfi
+
+#endif /* CONFIG_PPC_47x */
+
+	/* Debug Interrupt */
+	/*
+	 * This statement needs to exist at the end of the IVPR
+	 * definition just in case you end up taking a debug
+	 * exception within another exception.
+	 */
+	DEBUG_CRIT_EXCEPTION
+
 /*
  * Global functions
  */
@@ -647,6 +703,428 @@
 	blr
 
 /*
+ * Init CPU state. This is called at boot time or for secondary CPUs
+ * to setup initial TLB entries, setup IVORs, etc...
+ *
+ */
+_GLOBAL(init_cpu_state)
+	mflr	r22
+#ifdef CONFIG_PPC_47x
+	/* We use the PVR to differenciate 44x cores from 476 */
+	mfspr	r3,SPRN_PVR
+	srwi	r3,r3,16
+	cmplwi	cr0,r3,PVR_476@h
+	beq	head_start_47x
+	cmplwi	cr0,r3,PVR_476_ISS@h
+	beq	head_start_47x
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+	mfspr	r3,SPRN_CCR0
+	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
+	isync
+	mtspr	SPRN_CCR0,r3
+	isync
+	sync
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from.  We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ *	 determine which physical region we are located
+ *	 in.  This can be used to determine where in RAM
+ *	 (on a shared CPU system) or PCI memory space
+ *	 (on a DRAMless system) we are located.
+ *       For now, we assume a perfect world which means
+ *	 we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+	mfspr	r3,SPRN_PID			/* Get PID */
+	mfmsr	r4				/* Get MSR */
+	andi.	r4,r4,MSR_IS@l			/* TS=1? */
+	beq	wmmucr				/* If not, leave STS=0 */
+	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
+wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
+	sync
+
+	bl	invstr				/* Find our address */
+invstr:	mflr	r5				/* Make it accessible */
+	tlbsx	r23,0,r5			/* Find entry we are in */
+	li	r4,0				/* Start at TLB entry 0 */
+	li	r3,0				/* Set PAGEID inval value */
+1:	cmpw	r23,r4				/* Is this our entry? */
+	beq	skpinv				/* If so, skip the inval */
+	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
+skpinv:	addi	r4,r4,1				/* Increment */
+	cmpwi	r4,64				/* Are we done? */
+	bne	1b				/* If not, repeat */
+	isync					/* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+	lis	r3,PAGE_OFFSET@h
+	ori	r3,r3,PAGE_OFFSET@l
+
+	/* Kernel is at the base of RAM */
+	li r4, 0			/* Load the kernel physical address */
+
+	/* Load the kernel PID = 0 */
+	li	r0,0
+	mtspr	SPRN_PID,r0
+	sync
+
+	/* Initialize MMUCR */
+	li	r5,0
+	mtspr	SPRN_MMUCR,r5
+	sync
+
+	/* pageid fields */
+	clrrwi	r3,r3,10		/* Mask off the effective page number */
+	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+	/* xlat fields */
+	clrrwi	r4,r4,10		/* Mask off the real page number */
+					/* ERPN is 0 for first 4GB page */
+
+	/* attrib fields */
+	/* Added guarded bit to protect against speculative loads/stores */
+	li	r5,0
+	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+        li      r0,63                    /* TLB slot 63 */
+
+	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
+	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
+	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
+
+	/* Force context change */
+	mfmsr	r0
+	mtspr	SPRN_SRR1, r0
+	lis	r0,3f@h
+	ori	r0,r0,3f@l
+	mtspr	SPRN_SRR0,r0
+	sync
+	rfi
+
+	/* If necessary, invalidate original entry we used */
+3:	cmpwi	r23,63
+	beq	4f
+	li	r6,0
+	tlbwe   r6,r23,PPC44x_TLB_PAGEID
+	isync
+
+4:
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+	/* Add UART mapping for early debug. */
+
+	/* pageid fields */
+	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
+
+	/* xlat fields */
+	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+	/* attrib fields */
+	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
+        li      r0,62                    /* TLB slot 0 */
+
+	tlbwe	r3,r0,PPC44x_TLB_PAGEID
+	tlbwe	r4,r0,PPC44x_TLB_XLAT
+	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
+
+	/* Force context change */
+	isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+	/* Establish the interrupt vector offsets */
+	SET_IVOR(0,  CriticalInput);
+	SET_IVOR(1,  MachineCheck);
+	SET_IVOR(2,  DataStorage);
+	SET_IVOR(3,  InstructionStorage);
+	SET_IVOR(4,  ExternalInput);
+	SET_IVOR(5,  Alignment);
+	SET_IVOR(6,  Program);
+	SET_IVOR(7,  FloatingPointUnavailable);
+	SET_IVOR(8,  SystemCall);
+	SET_IVOR(9,  AuxillaryProcessorUnavailable);
+	SET_IVOR(10, Decrementer);
+	SET_IVOR(11, FixedIntervalTimer);
+	SET_IVOR(12, WatchdogTimer);
+	SET_IVOR(13, DataTLBError44x);
+	SET_IVOR(14, InstructionTLBError44x);
+	SET_IVOR(15, DebugCrit);
+
+	b	head_start_common
+
+
+#ifdef CONFIG_PPC_47x
+
+#ifdef CONFIG_SMP
+
+/* Entry point for secondary 47x processors */
+_GLOBAL(start_secondary_47x)
+        mr      r24,r3          /* CPU number */
+
+	bl	init_cpu_state
+
+	/* Now we need to bolt the rest of kernel memory which
+	 * is done in C code. We must be careful because our task
+	 * struct or our stack can (and will probably) be out
+	 * of reach of the initial 256M TLB entry, so we use a
+	 * small temporary stack in .bss for that. This works
+	 * because only one CPU at a time can be in this code
+	 */
+	lis	r1,temp_boot_stack@h
+	ori	r1,r1,temp_boot_stack@l
+	addi	r1,r1,1024-STACK_FRAME_OVERHEAD
+	li	r0,0
+	stw	r0,0(r1)
+	bl	mmu_init_secondary
+
+	/* Now we can get our task struct and real stack pointer */
+
+	/* Get current_thread_info and current */
+	lis	r1,secondary_ti@ha
+	lwz	r1,secondary_ti@l(r1)
+	lwz	r2,TI_TASK(r1)
+
+	/* Current stack pointer */
+	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+	li	r0,0
+	stw	r0,0(r1)
+
+	/* Kernel stack for exception entry in SPRG3 */
+	addi	r4,r2,THREAD	/* init task's THREAD */
+	mtspr	SPRN_SPRG3,r4
+
+	b	start_secondary
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ */
+
+head_start_47x:
+	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+	mfspr	r3,SPRN_PID			/* Get PID */
+	mfmsr	r4				/* Get MSR */
+	andi.	r4,r4,MSR_IS@l			/* TS=1? */
+	beq	1f				/* If not, leave STS=0 */
+	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
+1:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
+	sync
+
+	/* Find the entry we are running from */
+	bl	1f
+1:	mflr	r23
+	tlbsx	r23,0,r23
+	tlbre	r24,r23,0
+	tlbre	r25,r23,1
+	tlbre	r26,r23,2
+
+/*
+ * Cleanup time
+ */
+
+	/* Initialize MMUCR */
+	li	r5,0
+	mtspr	SPRN_MMUCR,r5
+	sync
+
+clear_all_utlb_entries:
+
+	#; Set initial values.
+
+	addis		r3,0,0x8000
+	addi		r4,0,0
+	addi		r5,0,0
+	b		clear_utlb_entry
+
+	#; Align the loop to speed things up.
+
+	.align		6
+
+clear_utlb_entry:
+
+	tlbwe		r4,r3,0
+	tlbwe		r5,r3,1
+	tlbwe		r5,r3,2
+	addis		r3,r3,0x2000
+	cmpwi		r3,0
+	bne		clear_utlb_entry
+	addis		r3,0,0x8000
+	addis		r4,r4,0x100
+	cmpwi		r4,0
+	bne		clear_utlb_entry
+
+	#; Restore original entry.
+
+	oris	r23,r23,0x8000  /* specify the way */
+	tlbwe		r24,r23,0
+	tlbwe		r25,r23,1
+	tlbwe		r26,r23,2
+
+/*
+ * Configure and load pinned entry into TLB for the kernel core
+ */
+
+	lis	r3,PAGE_OFFSET@h
+	ori	r3,r3,PAGE_OFFSET@l
+
+	/* Kernel is at the base of RAM */
+	li r4, 0			/* Load the kernel physical address */
+
+	/* Load the kernel PID = 0 */
+	li	r0,0
+	mtspr	SPRN_PID,r0
+	sync
+
+	/* Word 0 */
+	clrrwi	r3,r3,12		/* Mask off the effective page number */
+	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
+
+	/* Word 1 */
+	clrrwi	r4,r4,12		/* Mask off the real page number */
+					/* ERPN is 0 for first 4GB page */
+	/* Word 2 */
+	li	r5,0
+	ori	r5,r5,PPC47x_TLB2_S_RWX
+#ifdef CONFIG_SMP
+	ori	r5,r5,PPC47x_TLB2_M
+#endif
+
+	/* We write to way 0 and bolted 0 */
+	lis	r0,0x8800
+	tlbwe	r3,r0,0
+	tlbwe	r4,r0,1
+	tlbwe	r5,r0,2
+
+/*
+ * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
+ * them up later
+ */
+	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
+	mtspr	SPRN_SSPCR,r3
+	mtspr	SPRN_USPCR,r3
+	LOAD_REG_IMMEDIATE(r3, 0x12345670)
+	mtspr	SPRN_ISPCR,r3
+
+	/* Force context change */
+	mfmsr	r0
+	mtspr	SPRN_SRR1, r0
+	lis	r0,3f@h
+	ori	r0,r0,3f@l
+	mtspr	SPRN_SRR0,r0
+	sync
+	rfi
+
+	/* Invalidate original entry we used */
+3:
+	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
+	tlbwe	r24,r23,0
+	addi	r24,0,0
+	tlbwe	r24,r23,1
+	tlbwe	r24,r23,2
+	isync                   /* Clear out the shadow TLB entries */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+	/* Add UART mapping for early debug. */
+
+	/* Word 0 */
+	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
+
+	/* Word 1 */
+	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+	/* Word 2 */
+	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
+
+	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
+	 * congruence class as the kernel, we need to make sure of it at
+	 * some point
+	 */
+        lis	r0,0x8d00
+	tlbwe	r3,r0,0
+	tlbwe	r4,r0,1
+	tlbwe	r5,r0,2
+
+	/* Force context change */
+	isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+	/* Establish the interrupt vector offsets */
+	SET_IVOR(0,  CriticalInput);
+	SET_IVOR(1,  MachineCheckA);
+	SET_IVOR(2,  DataStorage);
+	SET_IVOR(3,  InstructionStorage);
+	SET_IVOR(4,  ExternalInput);
+	SET_IVOR(5,  Alignment);
+	SET_IVOR(6,  Program);
+	SET_IVOR(7,  FloatingPointUnavailable);
+	SET_IVOR(8,  SystemCall);
+	SET_IVOR(9,  AuxillaryProcessorUnavailable);
+	SET_IVOR(10, Decrementer);
+	SET_IVOR(11, FixedIntervalTimer);
+	SET_IVOR(12, WatchdogTimer);
+	SET_IVOR(13, DataTLBError47x);
+	SET_IVOR(14, InstructionTLBError47x);
+	SET_IVOR(15, DebugCrit);
+
+	/* We configure icbi to invalidate 128 bytes at a time since the
+	 * current 32-bit kernel code isn't too happy with icache != dcache
+	 * block size
+	 */
+	mfspr	r3,SPRN_CCR0
+	oris	r3,r3,0x0020
+	mtspr	SPRN_CCR0,r3
+	isync
+
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * Here we are back to code that is common between 44x and 47x
+ *
+ * We proceed to further kernel initialization and return to the
+ * main kernel entry
+ */
+head_start_common:
+	/* Establish the interrupt vector base */
+	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
+	mtspr	SPRN_IVPR,r4
+
+	addis	r22,r22,KERNELBASE@h
+	mtlr	r22
+	isync
+	blr
+
+/*
  * We put a few things here that have to be page-aligned. This stuff
  * goes at the beginning of the data segment, which is page-aligned.
  */
@@ -671,3 +1149,9 @@
  */
 abatron_pteptrs:
 	.space	8
+
+#ifdef CONFIG_SMP
+	.align	12
+temp_boot_stack:
+	.space	1024
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3ef743f..1f1a04b 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -71,9 +71,6 @@
  * in the first level table, but that would require many changes to the
  * Linux page directory/table functions that I don't want to do right now.
  *
- * I used to use SPRG2 for a temporary register in the TLB handler, but it
- * has since been put to other uses.  I now use a hack to save a register
- * and the CCR at memory location 0.....Someday I'll fix this.....
  *	-- Dan
  */
 	.globl	__start
@@ -302,8 +299,13 @@
 	DO_8xx_CPU6(0x3f80, r3)
 	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
 	mfcr	r10
+#ifdef CONFIG_8xx_CPU6
 	stw	r10, 0(r0)
 	stw	r11, 4(r0)
+#else
+	mtspr	SPRN_DAR, r10
+	mtspr	SPRN_SPRG2, r11
+#endif
 	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
 #ifdef CONFIG_8xx_CPU15
 	addi	r11, r10, 0x1000
@@ -318,12 +320,16 @@
 	/* If we are faulting a kernel address, we have to use the
 	 * kernel page tables.
 	 */
+#ifdef CONFIG_MODULES
+	/* Only modules will cause ITLB Misses as we always
+	 * pin the first 8MB of kernel memory */
 	andi.	r11, r10, 0x0800	/* Address >= 0x80000000 */
 	beq	3f
 	lis	r11, swapper_pg_dir@h
 	ori	r11, r11, swapper_pg_dir@l
 	rlwimi	r10, r11, 0, 2, 19
 3:
+#endif
 	lwz	r11, 0(r10)	/* Get the level 1 entry */
 	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
 	beq	2f		/* If zero, don't try to find a pte */
@@ -339,31 +345,35 @@
 	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */
 	lwz	r10, 0(r11)	/* Get the pte */
 
+#ifdef CONFIG_SWAP
 	andi.	r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
 	cmpwi	cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
 	bne-	cr0, 2f
-
-	/* Clear PP lsb, 0x400 */
-	rlwinm 	r10, r10, 0, 22, 20
-
+#endif
 	/* The Linux PTE won't go exactly into the MMU TLB.
-	 * Software indicator bits 22 and 28 must be clear.
+	 * Software indicator bits 21 and 28 must be clear.
 	 * Software indicator bits 24, 25, 26, and 27 must be
 	 * set.  All other Linux PTE bits control the behavior
 	 * of the MMU.
 	 */
 	li	r11, 0x00f0
-	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
+	rlwimi	r10, r11, 0, 0x07f8	/* Set 24-27, clear 21-23,28 */
 	DO_8xx_CPU6(0x2d80, r3)
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
-	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	/* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+	mfspr	r10, SPRN_DAR
+	mtcr	r10
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_SPRG2
+#else
 	lwz	r11, 0(r0)
 	mtcr	r11
 	lwz	r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
 	lwz	r3, 8(r0)
 #endif
+	mfspr	r10, SPRN_M_TW
 	rfi
 2:
 	mfspr	r11, SPRN_SRR1
@@ -373,13 +383,20 @@
 	rlwinm	r11, r11, 0, 0xffff
 	mtspr	SPRN_SRR1, r11
 
-	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	/* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+	mfspr	r10, SPRN_DAR
+	mtcr	r10
+	li	r11, 0x00f0
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_SPRG2
+#else
 	lwz	r11, 0(r0)
 	mtcr	r11
 	lwz	r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
 	lwz	r3, 8(r0)
 #endif
+	mfspr	r10, SPRN_M_TW
 	b	InstructionAccess
 
 	. = 0x1200
@@ -390,8 +407,13 @@
 	DO_8xx_CPU6(0x3f80, r3)
 	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
 	mfcr	r10
+#ifdef CONFIG_8xx_CPU6
 	stw	r10, 0(r0)
 	stw	r11, 4(r0)
+#else
+	mtspr	SPRN_DAR, r10
+	mtspr	SPRN_SPRG2, r11
+#endif
 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
 
 	/* If we are faulting a kernel address, we have to use the
@@ -438,15 +460,14 @@
 	 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
 	 * r10 = (r10 & ~PRESENT) | r11;
 	 */
+#ifdef CONFIG_SWAP
 	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
 	and	r11, r11, r10
 	rlwimi	r10, r11, 0, _PAGE_PRESENT
-
+#endif
 	/* Honour kernel RO, User NA */
 	/* 0x200 == Extended encoding, bit 22 */
-	/* r11 =  (r10 & _PAGE_USER) >> 2 */
-	rlwinm	r11, r10, 32-2, 0x200
-	or	r10, r11, r10
+	rlwimi	r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
 	/* r11 =  (r10 & _PAGE_RW) >> 1 */
 	rlwinm	r11, r10, 32-1, 0x200
 	or	r10, r11, r10
@@ -460,18 +481,24 @@
 	 * of the MMU.
 	 */
 2:	li	r11, 0x00f0
-	mtspr	SPRN_DAR,r11	/* Tag DAR */
 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
 	DO_8xx_CPU6(0x3d80, r3)
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
-	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	/* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+	mfspr	r10, SPRN_DAR
+	mtcr	r10
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_SPRG2
+#else
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
 	lwz	r11, 0(r0)
 	mtcr	r11
 	lwz	r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
 	lwz	r3, 8(r0)
 #endif
+	mfspr	r10, SPRN_M_TW
 	rfi
 
 /* This is an instruction TLB error on the MPC8xx.  This could be due
@@ -683,9 +710,6 @@
 	tophys(r4,r2)
 	addi	r4,r4,THREAD	/* init task's THREAD */
 	mtspr	SPRN_SPRG_THREAD,r4
-	li	r3,0
-	/* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
-	mtspr	SPRN_SPRG2,r3	/* 0 => r1 has kernel sp */
 
 	/* stack */
 	lis	r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 50504ae..a0bf158 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -1,6 +1,7 @@
 #ifndef __HEAD_BOOKE_H__
 #define __HEAD_BOOKE_H__
 
+#include <asm/ptrace.h>	/* for STACK_FRAME_REGS_MARKER */
 /*
  * Macros used for common Book-e exception handling
  */
@@ -48,6 +49,9 @@
 	stw	r10,0(r11);						     \
 	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
 	stw	r0,GPR0(r11);						     \
+	lis	r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
+	addi	r10, r10, STACK_FRAME_REGS_MARKER@l;			     \
+	stw	r10, 8(r11);						     \
 	SAVE_4GPRS(3, r11);						     \
 	SAVE_2GPRS(7, r11)
 
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 7255265..edd4a57 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -639,6 +639,13 @@
 	rlwinm	r12,r12,0,16,1
 	mtspr	SPRN_MAS1,r12
 
+	/* Make up the required permissions for kernel code */
+#ifdef CONFIG_PTE_64BIT
+	li	r13,_PAGE_PRESENT | _PAGE_BAP_SX
+	oris	r13,r13,_PAGE_ACCESSED@h
+#else
+	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+#endif
 	b	4f
 
 	/* Get the PGD for the current thread */
@@ -646,15 +653,15 @@
 	mfspr	r11,SPRN_SPRG_THREAD
 	lwz	r11,PGDIR(r11)
 
-4:
-	/* Make up the required permissions */
+	/* Make up the required permissions for user code */
 #ifdef CONFIG_PTE_64BIT
-	li	r13,_PAGE_PRESENT | _PAGE_EXEC
+	li	r13,_PAGE_PRESENT | _PAGE_BAP_UX
 	oris	r13,r13,_PAGE_ACCESSED@h
 #else
 	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 #endif
 
+4:
 	FIND_PTE
 	andc.	r13,r13,r11		/* Check permission */
 
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ec94f90..d583917 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -43,20 +43,9 @@
 #define DBG(...)
 
 static int novmerge;
-static int protect4gb = 1;
 
 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
 
-static int __init setup_protect4gb(char *str)
-{
-	if (strcmp(str, "on") == 0)
-		protect4gb = 1;
-	else if (strcmp(str, "off") == 0)
-		protect4gb = 0;
-
-	return 1;
-}
-
 static int __init setup_iommu(char *str)
 {
 	if (!strcmp(str, "novmerge"))
@@ -66,7 +55,6 @@
 	return 1;
 }
 
-__setup("protect4gb=", setup_protect4gb);
 __setup("iommu=", setup_iommu);
 
 static unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 066bd31..30817d9 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -284,30 +284,33 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const struct cpumask *map)
 {
 	struct irq_desc *desc;
 	unsigned int irq;
 	static int warned;
+	cpumask_var_t mask;
+
+	alloc_cpumask_var(&mask, GFP_KERNEL);
 
 	for_each_irq(irq) {
-		cpumask_t mask;
-
 		desc = irq_to_desc(irq);
 		if (desc && desc->status & IRQ_PER_CPU)
 			continue;
 
-		cpumask_and(&mask, desc->affinity, &map);
-		if (any_online_cpu(mask) == NR_CPUS) {
+		cpumask_and(mask, desc->affinity, map);
+		if (cpumask_any(mask) >= nr_cpu_ids) {
 			printk("Breaking affinity for irq %i\n", irq);
-			mask = map;
+			cpumask_copy(mask, map);
 		}
 		if (desc->chip->set_affinity)
-			desc->chip->set_affinity(irq, &mask);
+			desc->chip->set_affinity(irq, mask);
 		else if (desc->action && !(warned++))
 			printk("Cannot set affinity for irq %i\n", irq);
 	}
 
+	free_cpumask_var(mask);
+
 	local_irq_enable();
 	mdelay(1);
 	local_irq_disable();
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 41bada0..82a7b22 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -20,6 +20,7 @@
 #include <linux/smp.h>
 #include <linux/signal.h>
 #include <linux/ptrace.h>
+#include <linux/kdebug.h>
 #include <asm/current.h>
 #include <asm/processor.h>
 #include <asm/machdep.h>
@@ -115,7 +116,8 @@
 /* KGDB functions to use existing PowerPC64 hooks. */
 static int kgdb_debugger(struct pt_regs *regs)
 {
-	return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+	return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
+				      DIE_OOPS, regs);
 }
 
 static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +125,7 @@
 	if (user_mode(regs))
 		return 0;
 
-	if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+	if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
 		return 0;
 
 	if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +311,11 @@
 	       (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->nip = pc;
+}
+
 /*
  * This function does PowerPC specific procesing for interfacing to gdb.
  */
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b36f074..c533525 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -114,6 +114,9 @@
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	regs->msr &= ~MSR_CE;
 	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+	isync();
+#endif
 #endif
 
 	/*
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index c2c70e1..50362b6 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -38,7 +38,7 @@
 #include <asm/vio.h>
 #include <asm/mmu.h>
 
-#define MODULE_VERS "1.8"
+#define MODULE_VERS "1.9"
 #define MODULE_NAME "lparcfg"
 
 /* #define LPARCFG_DEBUG */
@@ -487,6 +487,14 @@
 	seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
 }
 
+static void parse_em_data(struct seq_file *m)
+{
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+		seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
 static int pseries_lparcfg_data(struct seq_file *m, void *v)
 {
 	int partition_potential_processors;
@@ -541,6 +549,8 @@
 
 	seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 
+	parse_em_data(m);
+
 	return 0;
 }
 
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 040bd1d..26f9900 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -155,33 +155,38 @@
 
 #ifdef CONFIG_SMP
 
-/* FIXME: we should schedule this function to be called on all cpus based
- * on calling the interrupts, but we would like to call it off irq level
- * so that the interrupt controller is clean.
- */
+static int kexec_all_irq_disabled = 0;
+
 static void kexec_smp_down(void *arg)
 {
+	local_irq_disable();
+	mb(); /* make sure our irqs are disabled before we say they are */
+	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+	while(kexec_all_irq_disabled == 0)
+		cpu_relax();
+	mb(); /* make sure all irqs are disabled before this */
+	/*
+	 * Now every CPU has IRQs off, we can clear out any pending
+	 * IPIs and be sure that no more will come in after this.
+	 */
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(0, 1);
 
-	local_irq_disable();
 	kexec_smp_wait();
 	/* NOTREACHED */
 }
 
-static void kexec_prepare_cpus(void)
+static void kexec_prepare_cpus_wait(int wait_state)
 {
 	int my_cpu, i, notified=-1;
 
-	smp_call_function(kexec_smp_down, NULL, /* wait */0);
 	my_cpu = get_cpu();
-
-	/* check the others cpus are now down (via paca hw cpu id == -1) */
+	/* Make sure each CPU has atleast made it to the state we need */
 	for (i=0; i < NR_CPUS; i++) {
 		if (i == my_cpu)
 			continue;
 
-		while (paca[i].hw_cpu_id != -1) {
+		while (paca[i].kexec_state < wait_state) {
 			barrier();
 			if (!cpu_possible(i)) {
 				printk("kexec: cpu %d hw_cpu_id %d is not"
@@ -201,20 +206,35 @@
 			}
 			if (i != notified) {
 				printk( "kexec: waiting for cpu %d (physical"
-						" %d) to go down\n",
-						i, paca[i].hw_cpu_id);
+						" %d) to enter %i state\n",
+					i, paca[i].hw_cpu_id, wait_state);
 				notified = i;
 			}
 		}
 	}
+	mb();
+}
+
+static void kexec_prepare_cpus(void)
+{
+
+	smp_call_function(kexec_smp_down, NULL, /* wait */0);
+	local_irq_disable();
+	mb(); /* make sure IRQs are disabled before we say they are */
+	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+	kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
+	/* we are sure every CPU has IRQs off at this point */
+	kexec_all_irq_disabled = 1;
 
 	/* after we tell the others to go down */
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(0, 0);
 
-	put_cpu();
+	/* Before removing MMU mapings make sure all CPUs have entered real mode */
+	kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
 
-	local_irq_disable();
+	put_cpu();
 }
 
 #else /* ! SMP */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8649f53..8043d1b 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -441,7 +441,7 @@
 	addi	r3,r3,L1_CACHE_BYTES
 	bdnz	0b
 	sync
-#ifndef CONFIG_44x
+#ifdef CONFIG_44x
 	/* We don't flush the icache on 44x. Those have a virtual icache
 	 * and we don't have access to the virtual address here (it's
 	 * not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@
 	 * a change in the address space occurs, before returning to
 	 * user space
 	 */
+BEGIN_MMU_FTR_SECTION
+	blr
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
+#endif /* CONFIG_44x */
 	mtctr	r4
 1:	icbi	0,r6
 	addi	r6,r6,L1_CACHE_BYTES
 	bdnz	1b
 	sync
 	isync
-#endif /* CONFIG_44x */
 	blr
 
+#ifndef CONFIG_BOOKE
 /*
  * Flush a particular page from the data cache to RAM, identified
  * by its physical address.  We turn off the MMU so we can just use
@@ -490,6 +494,7 @@
 	mtmsr	r10				/* restore DR */
 	isync
 	blr
+#endif /* CONFIG_BOOKE */
 
 /*
  * Clear pages using the dcbz instruction, which doesn't cause any
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a5cf9c1..a2b18df 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -24,6 +24,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
+#include <asm/kexec.h>
 
 	.text
 
@@ -471,6 +472,10 @@
 1:	mflr	r5
 	addi	r5,r5,kexec_flag-1b
 
+	li	r4,KEXEC_STATE_REAL_MODE
+	stb	r4,PACAKEXECSTATE(r13)
+	SYNC
+
 99:	HMT_LOW
 #ifdef CONFIG_KEXEC		/* use no memory without kexec */
 	lwz	r4,0(r5)
@@ -494,14 +499,11 @@
  * note: this is a terminal routine, it does not save lr
  *
  * get phys id from paca
- * set paca id to -1 to say we got here
  * switch to real mode
  * join other cpus in kexec_wait(phys_id)
  */
 _GLOBAL(kexec_smp_wait)
 	lhz	r3,PACAHWCPUID(r13)
-	li	r4,-1
-	sth	r4,PACAHWCPUID(r13)	/* let others know we left */
 	bl	real_mode
 	b	.kexec_wait
 
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0c40c6f..f88acf0 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,7 @@
 #include <asm/pgtable.h>
 #include <asm/iseries/lpar_map.h>
 #include <asm/iseries/hv_types.h>
+#include <asm/kexec.h>
 
 /* This symbol is provided by the linker - let it fill in the paca
  * field correctly */
@@ -97,6 +98,7 @@
 	new_paca->kernelbase = (unsigned long) _stext;
 	new_paca->kernel_msr = MSR_KERNEL;
 	new_paca->hw_cpu_id = 0xffff;
+	new_paca->kexec_state = KEXEC_STATE_NONE;
 	new_paca->__current = &init_task;
 #ifdef CONFIG_PPC_STD_MMU_64
 	new_paca->slb_shadow_ptr = &slb_shadow[cpu];
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index cd11d5c..6ddb795f 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -310,6 +310,8 @@
 	/* Scan direct children */
 	for_each_child_of_node(node, child) {
 		pr_debug("  * %s\n", child->full_name);
+		if (!of_device_is_available(child))
+			continue;
 		reg = of_get_property(child, "reg", &reglen);
 		if (reg == NULL || reglen < 20)
 			continue;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e4d71ce..9d255b4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -371,6 +371,9 @@
 	/* XXX should we have a CPU_FTR_HAS_DABR ? */
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	mtspr(SPRN_DAC1, dabr);
+#ifdef CONFIG_PPC_47x
+	isync();
+#endif
 #elif defined(CONFIG_PPC_BOOK3S)
 	mtspr(SPRN_DABR, dabr);
 #endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ed2cfe1..7a0c0199 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -39,6 +39,109 @@
 #include <asm/system.h>
 
 /*
+ * The parameter save area on the stack is used to store arguments being passed
+ * to callee function and is located at fixed offset from stack pointer.
+ */
+#ifdef CONFIG_PPC32
+#define PARAMETER_SAVE_AREA_OFFSET	24  /* bytes */
+#else /* CONFIG_PPC32 */
+#define PARAMETER_SAVE_AREA_OFFSET	48  /* bytes */
+#endif
+
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define STR(s)	#s			/* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num)	\
+	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	GPR_OFFSET_NAME(0),
+	GPR_OFFSET_NAME(1),
+	GPR_OFFSET_NAME(2),
+	GPR_OFFSET_NAME(3),
+	GPR_OFFSET_NAME(4),
+	GPR_OFFSET_NAME(5),
+	GPR_OFFSET_NAME(6),
+	GPR_OFFSET_NAME(7),
+	GPR_OFFSET_NAME(8),
+	GPR_OFFSET_NAME(9),
+	GPR_OFFSET_NAME(10),
+	GPR_OFFSET_NAME(11),
+	GPR_OFFSET_NAME(12),
+	GPR_OFFSET_NAME(13),
+	GPR_OFFSET_NAME(14),
+	GPR_OFFSET_NAME(15),
+	GPR_OFFSET_NAME(16),
+	GPR_OFFSET_NAME(17),
+	GPR_OFFSET_NAME(18),
+	GPR_OFFSET_NAME(19),
+	GPR_OFFSET_NAME(20),
+	GPR_OFFSET_NAME(21),
+	GPR_OFFSET_NAME(22),
+	GPR_OFFSET_NAME(23),
+	GPR_OFFSET_NAME(24),
+	GPR_OFFSET_NAME(25),
+	GPR_OFFSET_NAME(26),
+	GPR_OFFSET_NAME(27),
+	GPR_OFFSET_NAME(28),
+	GPR_OFFSET_NAME(29),
+	GPR_OFFSET_NAME(30),
+	GPR_OFFSET_NAME(31),
+	REG_OFFSET_NAME(nip),
+	REG_OFFSET_NAME(msr),
+	REG_OFFSET_NAME(ctr),
+	REG_OFFSET_NAME(link),
+	REG_OFFSET_NAME(xer),
+	REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+	REG_OFFSET_NAME(softe),
+#else
+	REG_OFFSET_NAME(mq),
+#endif
+	REG_OFFSET_NAME(trap),
+	REG_OFFSET_NAME(dar),
+	REG_OFFSET_NAME(dsisr),
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:	the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+	const struct pt_regs_offset *roff;
+	for (roff = regoffset_table; roff->name != NULL; roff++)
+		if (!strcmp(roff->name, name))
+			return roff->offset;
+	return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset:	the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+	const struct pt_regs_offset *roff;
+	for (roff = regoffset_table; roff->name != NULL; roff++)
+		if (roff->offset == offset)
+			return roff->name;
+	return NULL;
+}
+
+/*
  * does not yet catch signals sent when the child dies.
  * in exit.c or in signal.c.
  */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7436784..0e1ec6f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -691,10 +691,14 @@
 {
 	int status;
 
-	if (panic_timeout)
-		return;
-
-	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
+	/*
+	 * Firmware with the ibm,extended-os-term property is guaranteed
+	 * to always return from an ibm,os-term call. Earlier versions without
+	 * this property may terminate the partition which we want to avoid
+	 * since it interferes with panic_timeout.
+	 */
+	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+	    RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
 		return;
 
 	snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@@ -705,8 +709,7 @@
 	} while (rtas_busy_delay(status));
 
 	if (status != 0)
-		printk(KERN_EMERG "ibm,os-term call failed %d\n",
-			       status);
+		printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
 }
 
 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 4190eae..638883e 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -411,9 +411,9 @@
 
 	get_online_cpus();
 
-	cpu = next_cpu(smp_processor_id(), cpu_online_map);
-	if (cpu == NR_CPUS) {
-		cpu = first_cpu(cpu_online_map);
+	cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
+        if (cpu >= nr_cpu_ids) {
+		cpu = cpumask_first(cpu_online_mask);
 
 		if (first_pass) {
 			first_pass = 0;
@@ -466,8 +466,8 @@
 	/* Retreive errors from nvram if any */
 	retreive_nvram_error_log();
 
-	schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
-				 event_scan_delay);
+	schedule_delayed_work_on(cpumask_first(cpu_online_mask),
+				 &event_scan_work, event_scan_delay);
 }
 
 static int __init rtas_init(void)
@@ -490,6 +490,12 @@
 		return -ENODEV;
 	}
 
+	if (!rtas_event_scan_rate) {
+		/* Broken firmware: take a rate of zero to mean don't scan */
+		printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
+		return 0;
+	}
+
 	/* Make room for the sequence number */
 	rtas_error_log_max = rtas_get_error_log_max();
 	rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 48f0a00..5e4d852 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -161,6 +161,38 @@
 DEFINE_PER_CPU(unsigned int, cpu_pvr);
 #endif
 
+static void show_cpuinfo_summary(struct seq_file *m)
+{
+	struct device_node *root;
+	const char *model = NULL;
+#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
+	unsigned long bogosum = 0;
+	int i;
+	for_each_online_cpu(i)
+		bogosum += loops_per_jiffy;
+	seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+		   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
+#endif /* CONFIG_SMP && CONFIG_PPC32 */
+	seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
+	if (ppc_md.name)
+		seq_printf(m, "platform\t: %s\n", ppc_md.name);
+	root = of_find_node_by_path("/");
+	if (root)
+		model = of_get_property(root, "model", NULL);
+	if (model)
+		seq_printf(m, "model\t\t: %s\n", model);
+	of_node_put(root);
+
+	if (ppc_md.show_cpuinfo != NULL)
+		ppc_md.show_cpuinfo(m);
+
+#ifdef CONFIG_PPC32
+	/* Display the amount of memory */
+	seq_printf(m, "Memory\t\t: %d MB\n",
+		   (unsigned int)(total_memory / (1024 * 1024)));
+#endif
+}
+
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
 	unsigned long cpu_id = (unsigned long)v - 1;
@@ -168,39 +200,6 @@
 	unsigned short maj;
 	unsigned short min;
 
-	if (cpu_id == NR_CPUS) {
-		struct device_node *root;
-		const char *model = NULL;
-#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
-		unsigned long bogosum = 0;
-		int i;
-		for_each_online_cpu(i)
-			bogosum += loops_per_jiffy;
-		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
-			   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
-#endif /* CONFIG_SMP && CONFIG_PPC32 */
-		seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
-		if (ppc_md.name)
-			seq_printf(m, "platform\t: %s\n", ppc_md.name);
-		root = of_find_node_by_path("/");
-		if (root)
-			model = of_get_property(root, "model", NULL);
-		if (model)
-			seq_printf(m, "model\t\t: %s\n", model);
-		of_node_put(root);
-
-		if (ppc_md.show_cpuinfo != NULL)
-			ppc_md.show_cpuinfo(m);
-
-#ifdef CONFIG_PPC32
-		/* Display the amount of memory */
-		seq_printf(m, "Memory\t\t: %d MB\n",
-			   (unsigned int)(total_memory / (1024 * 1024)));
-#endif
-
-		return 0;
-	}
-
 	/* We only show online cpus: disable preempt (overzealous, I
 	 * knew) to prevent cpu going down. */
 	preempt_disable();
@@ -308,19 +307,28 @@
 #endif
 
 	preempt_enable();
+
+	/* If this is the last cpu, print the summary */
+	if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+		show_cpuinfo_summary(m);
+
 	return 0;
 }
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-	unsigned long i = *pos;
-
-	return i <= NR_CPUS ? (void *)(i + 1) : NULL;
+	if (*pos == 0)	/* just in case, cpu 0 is not the first */
+		*pos = cpumask_first(cpu_online_mask);
+	else
+		*pos = cpumask_next(*pos - 1, cpu_online_mask);
+	if ((*pos) < nr_cpu_ids)
+		return (void *)(unsigned long)(*pos + 1);
+	return NULL;
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
-	++*pos;
+	(*pos)++;
 	return c_start(m, pos);
 }
 
@@ -386,14 +394,14 @@
 
 /**
  * setup_cpu_maps - initialize the following cpu maps:
- *                  cpu_possible_map
- *                  cpu_present_map
+ *                  cpu_possible_mask
+ *                  cpu_present_mask
  *
  * Having the possible map set up early allows us to restrict allocations
  * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  *
  * We do not initialize the online map here; cpus set their own bits in
- * cpu_online_map as they come up.
+ * cpu_online_mask as they come up.
  *
  * This function is valid only for Open Firmware systems.  finish_device_tree
  * must be called before using this.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9143891..f3fb5a7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,9 +424,18 @@
 	DBG(" <- setup_system()\n");
 }
 
+static u64 slb0_limit(void)
+{
+	if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+		return 1UL << SID_SHIFT_1T;
+	}
+	return 1UL << SID_SHIFT;
+}
+
 #ifdef CONFIG_IRQSTACKS
 static void __init irqstack_early_init(void)
 {
+	u64 limit = slb0_limit();
 	unsigned int i;
 
 	/*
@@ -436,10 +445,10 @@
 	for_each_possible_cpu(i) {
 		softirq_ctx[i] = (struct thread_info *)
 			__va(lmb_alloc_base(THREAD_SIZE,
-					    THREAD_SIZE, 0x10000000));
+					    THREAD_SIZE, limit));
 		hardirq_ctx[i] = (struct thread_info *)
 			__va(lmb_alloc_base(THREAD_SIZE,
-					    THREAD_SIZE, 0x10000000));
+					    THREAD_SIZE, limit));
 	}
 }
 #else
@@ -470,7 +479,7 @@
  */
 static void __init emergency_stack_init(void)
 {
-	unsigned long limit;
+	u64 limit;
 	unsigned int i;
 
 	/*
@@ -482,7 +491,7 @@
 	 * bringup, we need to get at them in real mode. This means they
 	 * must also be within the RMO region.
 	 */
-	limit = min(0x10000000ULL, lmb.rmo_size);
+	limit = min(slb0_limit(), lmb.rmo_size);
 
 	for_each_possible_cpu(i) {
 		unsigned long sp;
@@ -573,12 +582,6 @@
 	printk("[boot]%04x %s\n", src, msg);
 }
 
-void cpu_die(void)
-{
-	if (ppc_md.cpu_die)
-		ppc_md.cpu_die();
-}
-
 #ifdef CONFIG_SMP
 #define PCPU_DYN_SIZE		()
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c2ee144..5c196d1 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,8 +59,8 @@
 
 struct thread_info *secondary_ti;
 
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
-DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -271,6 +271,16 @@
 	smp_store_cpu_info(boot_cpuid);
 	cpu_callin_map[boot_cpuid] = 1;
 
+	for_each_possible_cpu(cpu) {
+		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
+					GFP_KERNEL, cpu_to_node(cpu));
+		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
+					GFP_KERNEL, cpu_to_node(cpu));
+	}
+
+	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+
 	if (smp_ops)
 		if (smp_ops->probe)
 			max_cpus = smp_ops->probe();
@@ -289,10 +299,6 @@
 void __devinit smp_prepare_boot_cpu(void)
 {
 	BUG_ON(smp_processor_id() != boot_cpuid);
-
-	set_cpu_online(boot_cpuid, true);
-	cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
-	cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
 #ifdef CONFIG_PPC64
 	paca[boot_cpuid].__current = current;
 #endif
@@ -313,7 +319,7 @@
 	set_cpu_online(cpu, false);
 #ifdef CONFIG_PPC64
 	vdso_data->processorCount--;
-	fixup_irqs(cpu_online_map);
+	fixup_irqs(cpu_online_mask);
 #endif
 	return 0;
 }
@@ -333,7 +339,7 @@
 		cpu_relax();
 
 #ifdef CONFIG_PPC64
-	fixup_irqs(cpu_online_map);
+	fixup_irqs(cpu_online_mask);
 	/* counter the irq disable in fixup_irqs */
 	local_irq_enable();
 #endif
@@ -462,7 +468,7 @@
 	return id;
 }
 
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
  * i.e. during cpu online or offline.
  */
 static struct device_node *cpu_to_l2cache(int cpu)
@@ -495,6 +501,14 @@
 	current->active_mm = &init_mm;
 
 	smp_store_cpu_info(cpu);
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+	/* Clear any pending timer interrupts */
+	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+	/* Enable decrementer interrupt */
+	mtspr(SPRN_TCR, TCR_DIE);
+#endif
 	set_dec(tb_ticks_per_jiffy);
 	preempt_disable();
 	cpu_callin_map[cpu] = 1;
@@ -517,15 +531,15 @@
 	for (i = 0; i < threads_per_core; i++) {
 		if (cpu_is_offline(base + i))
 			continue;
-		cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
-		cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
+		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
 
 		/* cpu_core_map should be a superset of
 		 * cpu_sibling_map even if we don't have cache
 		 * information, so update the former here, too.
 		 */
-		cpu_set(cpu, per_cpu(cpu_core_map, base +i));
-		cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
+		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
 	}
 	l2_cache = cpu_to_l2cache(cpu);
 	for_each_online_cpu(i) {
@@ -533,8 +547,8 @@
 		if (!np)
 			continue;
 		if (np == l2_cache) {
-			cpu_set(cpu, per_cpu(cpu_core_map, i));
-			cpu_set(i, per_cpu(cpu_core_map, cpu));
+			cpumask_set_cpu(cpu, cpu_core_mask(i));
+			cpumask_set_cpu(i, cpu_core_mask(cpu));
 		}
 		of_node_put(np);
 	}
@@ -554,19 +568,22 @@
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-	cpumask_t old_mask;
+	cpumask_var_t old_mask;
 
 	/* We want the setup_cpu() here to be called from CPU 0, but our
 	 * init thread may have been "borrowed" by another CPU in the meantime
 	 * se we pin us down to CPU 0 for a short while
 	 */
-	old_mask = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
+	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+	cpumask_copy(old_mask, &current->cpus_allowed);
+	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
 	
 	if (smp_ops && smp_ops->setup_cpu)
 		smp_ops->setup_cpu(boot_cpuid);
 
-	set_cpus_allowed(current, old_mask);
+	set_cpus_allowed_ptr(current, old_mask);
+
+	free_cpumask_var(old_mask);
 
 	snapshot_timebases();
 
@@ -591,10 +608,10 @@
 	/* Update sibling maps */
 	base = cpu_first_thread_in_core(cpu);
 	for (i = 0; i < threads_per_core; i++) {
-		cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
-		cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
-		cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
-		cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
+		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
+		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
+		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
 	}
 
 	l2_cache = cpu_to_l2cache(cpu);
@@ -603,8 +620,8 @@
 		if (!np)
 			continue;
 		if (np == l2_cache) {
-			cpu_clear(cpu, per_cpu(cpu_core_map, i));
-			cpu_clear(i, per_cpu(cpu_core_map, cpu));
+			cpumask_clear_cpu(cpu, cpu_core_mask(i));
+			cpumask_clear_cpu(i, cpu_core_mask(cpu));
 		}
 		of_node_put(np);
 	}
@@ -631,4 +648,10 @@
 {
 	mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
 }
+
+void cpu_die(void)
+{
+	if (ppc_md.cpu_die)
+		ppc_md.cpu_die();
+}
 #endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e235e52..c0d8c20 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -35,7 +35,7 @@
 #ifdef CONFIG_PPC64
 
 /* Time in microseconds we delay before sleeping in the idle loop */
-DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
+DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
 
 static ssize_t store_smt_snooze_delay(struct sys_device *dev,
 				      struct sysdev_attribute *attr,
@@ -44,9 +44,9 @@
 {
 	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
 	ssize_t ret;
-	unsigned long snooze;
+	long snooze;
 
-	ret = sscanf(buf, "%lu", &snooze);
+	ret = sscanf(buf, "%ld", &snooze);
 	if (ret != 1)
 		return -EINVAL;
 
@@ -61,53 +61,23 @@
 {
 	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
 
-	return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
+	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
 }
 
 static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
 		   store_smt_snooze_delay);
 
-/* Only parse OF options if the matching cmdline option was not specified */
-static int smt_snooze_cmdline;
-
-static int __init smt_setup(void)
-{
-	struct device_node *options;
-	const unsigned int *val;
-	unsigned int cpu;
-
-	if (!cpu_has_feature(CPU_FTR_SMT))
-		return -ENODEV;
-
-	options = of_find_node_by_path("/options");
-	if (!options)
-		return -ENODEV;
-
-	val = of_get_property(options, "ibm,smt-snooze-delay", NULL);
-	if (!smt_snooze_cmdline && val) {
-		for_each_possible_cpu(cpu)
-			per_cpu(smt_snooze_delay, cpu) = *val;
-	}
-
-	of_node_put(options);
-	return 0;
-}
-__initcall(smt_setup);
-
 static int __init setup_smt_snooze_delay(char *str)
 {
 	unsigned int cpu;
-	int snooze;
+	long snooze;
 
 	if (!cpu_has_feature(CPU_FTR_SMT))
 		return 1;
 
-	smt_snooze_cmdline = 1;
-
-	if (get_option(&str, &snooze)) {
-		for_each_possible_cpu(cpu)
-			per_cpu(smt_snooze_delay, cpu) = snooze;
-	}
+	snooze = simple_strtol(str, NULL, 10);
+	for_each_possible_cpu(cpu)
+		per_cpu(smt_snooze_delay, cpu) = snooze;
 
 	return 1;
 }
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 29d128e..3031fc7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -380,6 +380,46 @@
 	}
 	return 0;
 }
+
+int machine_check_47x(struct pt_regs *regs)
+{
+	unsigned long reason = get_mc_reason(regs);
+	u32 mcsr;
+
+	printk(KERN_ERR "Machine check in kernel mode.\n");
+	if (reason & ESR_IMCP) {
+		printk(KERN_ERR
+		       "Instruction Synchronous Machine Check exception\n");
+		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+		return 0;
+	}
+	mcsr = mfspr(SPRN_MCSR);
+	if (mcsr & MCSR_IB)
+		printk(KERN_ERR "Instruction Read PLB Error\n");
+	if (mcsr & MCSR_DRB)
+		printk(KERN_ERR "Data Read PLB Error\n");
+	if (mcsr & MCSR_DWB)
+		printk(KERN_ERR "Data Write PLB Error\n");
+	if (mcsr & MCSR_TLBP)
+		printk(KERN_ERR "TLB Parity Error\n");
+	if (mcsr & MCSR_ICP) {
+		flush_instruction_cache();
+		printk(KERN_ERR "I-Cache Parity Error\n");
+	}
+	if (mcsr & MCSR_DCSP)
+		printk(KERN_ERR "D-Cache Search Parity Error\n");
+	if (mcsr & PPC47x_MCSR_GPR)
+		printk(KERN_ERR "GPR Parity Error\n");
+	if (mcsr & PPC47x_MCSR_FPR)
+		printk(KERN_ERR "FPR Parity Error\n");
+	if (mcsr & PPC47x_MCSR_IPR)
+		printk(KERN_ERR "Machine Check exception is imprecise\n");
+
+	/* Clear MCSR */
+	mtspr(SPRN_MCSR, mcsr);
+
+	return 0;
+}
 #elif defined(CONFIG_E500)
 int machine_check_e500(struct pt_regs *regs)
 {
@@ -815,12 +855,15 @@
 		return;
 	}
 	if (reason & REASON_TRAP) {
+		/* Debugger is first in line to stop recursive faults in
+		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
+		if (debugger_bpt(regs))
+			return;
+
 		/* trap exception */
 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
 				== NOTIFY_STOP)
 			return;
-		if (debugger_bpt(regs))
-			return;
 
 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 8223717..9ce7b62 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -645,8 +645,10 @@
 			found = 1;
 			break;
 		}
-	if (!found)
+	if (!found) {
+		spin_unlock_irqrestore(&vio_cmo.lock, flags);
 		return;
+	}
 
 	/* Increase/decrease in desired device entitlement */
 	if (desired >= viodev->cmo.desired) {
@@ -958,9 +960,12 @@
 
 static ssize_t name_show(struct device *, struct device_attribute *, char *);
 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf);
 static struct device_attribute vio_cmo_dev_attrs[] = {
 	__ATTR_RO(name),
 	__ATTR_RO(devspec),
+	__ATTR_RO(modalias),
 	__ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
 	       viodev_cmo_desired_show, viodev_cmo_desired_set),
 	__ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
@@ -1320,9 +1325,27 @@
 	return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	const struct vio_dev *vio_dev = to_vio_dev(dev);
+	struct device_node *dn;
+	const char *cp;
+
+	dn = dev->archdata.of_node;
+	if (!dn)
+		return -ENODEV;
+	cp = of_get_property(dn, "compatible", NULL);
+	if (!cp)
+		return -ENODEV;
+
+	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
 	__ATTR_RO(name),
 	__ATTR_RO(devspec),
+	__ATTR_RO(modalias),
 	__ATTR_NULL
 };
 
@@ -1365,6 +1388,7 @@
 	.match = vio_bus_match,
 	.probe = vio_bus_probe,
 	.remove = vio_bus_remove,
+	.pm = GENERIC_SUBSYS_PM_OPS,
 };
 
 /**
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 64e2e49..455881a 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -28,7 +28,7 @@
 /* This clears out any unused part of the destination buffer,
    just as the libc version does.  -- paulus */
 _GLOBAL(strncpy)
-	cmpwi	0,r5,0
+	PPC_LCMPI 0,r5,0
 	beqlr
 	mtctr	r5
 	addi	r6,r3,-1
@@ -39,7 +39,7 @@
 	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
 	bnelr			/* if we didn't hit a null char, we're done */
 	mfctr	r5
-	cmpwi	0,r5,0		/* any space left in destination buffer? */
+	PPC_LCMPI 0,r5,0	/* any space left in destination buffer? */
 	beqlr			/* we know r0 == 0 here */
 2:	stbu	r0,1(r6)	/* clear it out if so */
 	bdnz	2b
@@ -70,8 +70,8 @@
 	blr
 
 _GLOBAL(strncmp)
-	PPC_LCMPI r5,0
-	beqlr
+	PPC_LCMPI 0,r5,0
+	beq-	2f
 	mtctr	r5
 	addi	r5,r3,-1
 	addi	r4,r4,-1
@@ -82,6 +82,8 @@
 	beqlr	1
 	bdnzt	eq,1b
 	blr
+2:	li	r3,0
+	blr
 
 _GLOBAL(strlen)
 	addi	r4,r3,-1
@@ -92,8 +94,8 @@
 	blr
 
 _GLOBAL(memcmp)
-	cmpwi	0,r5,0
-	ble-	2f
+	PPC_LCMPI 0,r5,0
+	beq-	2f
 	mtctr	r5
 	addi	r6,r3,-1
 	addi	r4,r4,-1
@@ -106,8 +108,8 @@
 	blr
 
 _GLOBAL(memchr)
-	cmpwi	0,r5,0
-	ble-	2f
+	PPC_LCMPI 0,r5,0
+	beq-	2f
 	mtctr	r5
 	addi	r3,r3,-1
 1:	lbzu	r0,1(r3)
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 3986264..d8c6efb 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -38,7 +38,9 @@
 unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
 int icache_44x_need_flush;
 
-static void __init ppc44x_update_tlb_hwater(void)
+unsigned long tlb_47x_boltmap[1024/8];
+
+static void __cpuinit ppc44x_update_tlb_hwater(void)
 {
 	extern unsigned int tlb_44x_patch_hwater_D[];
 	extern unsigned int tlb_44x_patch_hwater_I[];
@@ -59,7 +61,7 @@
 }
 
 /*
- * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
  */
 static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
 {
@@ -67,12 +69,18 @@
 
 	ppc44x_update_tlb_hwater();
 
+	mtspr(SPRN_MMUCR, 0);
+
 	__asm__ __volatile__(
 		"tlbwe	%2,%3,%4\n"
 		"tlbwe	%1,%3,%5\n"
 		"tlbwe	%0,%3,%6\n"
 	:
+#ifdef CONFIG_PPC47x
+	: "r" (PPC47x_TLB2_S_RWX),
+#else
 	: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+#endif
 	  "r" (phys),
 	  "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
 	  "r" (entry),
@@ -81,8 +89,93 @@
 	  "i" (PPC44x_TLB_ATTRIB));
 }
 
+static int __init ppc47x_find_free_bolted(void)
+{
+	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+	if (!(mmube0 & MMUBE0_VBE0))
+		return 0;
+	if (!(mmube0 & MMUBE0_VBE1))
+		return 1;
+	if (!(mmube0 & MMUBE0_VBE2))
+		return 2;
+	if (!(mmube1 & MMUBE1_VBE3))
+		return 3;
+	if (!(mmube1 & MMUBE1_VBE4))
+		return 4;
+	if (!(mmube1 & MMUBE1_VBE5))
+		return 5;
+	return -1;
+}
+
+static void __init ppc47x_update_boltmap(void)
+{
+	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+	if (mmube0 & MMUBE0_VBE0)
+		__set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+	if (mmube0 & MMUBE0_VBE1)
+		__set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+	if (mmube0 & MMUBE0_VBE2)
+		__set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+	if (mmube1 & MMUBE1_VBE3)
+		__set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+	if (mmube1 & MMUBE1_VBE4)
+		__set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+	if (mmube1 & MMUBE1_VBE5)
+		__set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
+			  tlb_47x_boltmap);
+}
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
+ */
+static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+{
+	unsigned int rA;
+	int bolted;
+
+	/* Base rA is HW way select, way 0, bolted bit set */
+	rA = 0x88000000;
+
+	/* Look for a bolted entry slot */
+	bolted = ppc47x_find_free_bolted();
+	BUG_ON(bolted < 0);
+
+	/* Insert bolted slot number */
+	rA |= bolted << 24;
+
+	pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
+		 virt, phys, bolted);
+
+	mtspr(SPRN_MMUCR, 0);
+
+	__asm__ __volatile__(
+		"tlbwe	%2,%3,0\n"
+		"tlbwe	%1,%3,1\n"
+		"tlbwe	%0,%3,2\n"
+		:
+		: "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
+		       PPC47x_TLB2_SX
+#ifdef CONFIG_SMP
+		       | PPC47x_TLB2_M
+#endif
+		       ),
+		  "r" (phys),
+		  "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
+		  "r" (rA));
+}
+
 void __init MMU_init_hw(void)
 {
+	/* This is not useful on 47x but won't hurt either */
 	ppc44x_update_tlb_hwater();
 
 	flush_instruction_cache();
@@ -95,8 +188,51 @@
 	/* Pin in enough TLBs to cover any lowmem not covered by the
 	 * initial 256M mapping established in head_44x.S */
 	for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
-	     addr += PPC_PIN_SIZE)
-		ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+	     addr += PPC_PIN_SIZE) {
+		if (mmu_has_feature(MMU_FTR_TYPE_47x))
+			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+		else
+			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+	}
+	if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+		ppc47x_update_boltmap();
 
+#ifdef DEBUG
+		{
+			int i;
+
+			printk(KERN_DEBUG "bolted entries: ");
+			for (i = 0; i < 255; i++) {
+				if (test_bit(i, tlb_47x_boltmap))
+					printk("%d ", i);
+			}
+			printk("\n");
+		}
+#endif /* DEBUG */
+	}
 	return total_lowmem;
 }
+
+#ifdef CONFIG_SMP
+void __cpuinit mmu_init_secondary(int cpu)
+{
+	unsigned long addr;
+
+	/* Pin in enough TLBs to cover any lowmem not covered by the
+	 * initial 256M mapping established in head_44x.S
+	 *
+	 * WARNING: This is called with only the first 256M of the
+	 * linear mapping in the TLB and we can't take faults yet
+	 * so beware of what this code uses. It runs off a temporary
+	 * stack. current (r2) isn't initialized, smp_processor_id()
+	 * will not work, current thread info isn't accessible, ...
+	 */
+	for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+	     addr += PPC_PIN_SIZE) {
+		if (mmu_has_feature(MMU_FTR_TYPE_47x))
+			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+		else
+			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+	}
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 26fb6b9..1bd712c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -151,13 +151,14 @@
 	if (!user_mode(regs) && (address >= TASK_SIZE))
 		return SIGSEGV;
 
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
+			     defined(CONFIG_PPC_BOOK3S_64))
   	if (error_code & DSISR_DABRMATCH) {
 		/* DABR match */
 		do_dabr(regs, address, error_code);
 		return 0;
 	}
-#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
+#endif
 
 	if (in_atomic() || mm == NULL) {
 		if (!user_mode(regs))
@@ -307,7 +308,6 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
- survive:
 	ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
 	if (unlikely(ret & VM_FAULT_ERROR)) {
 		if (ret & VM_FAULT_OOM)
@@ -359,15 +359,10 @@
  */
 out_of_memory:
 	up_read(&mm->mmap_sem);
-	if (is_global_init(current)) {
-		yield();
-		down_read(&mm->mmap_sem);
-		goto survive;
-	}
-	printk("VM: killing process %s\n", current->comm);
-	if (user_mode(regs))
-		do_group_exit(SIGKILL);
-	return SIGKILL;
+	if (!user_mode(regs))
+		return SIGKILL;
+	pagefault_out_of_memory();
+	return 0;
 
 do_sigbus:
 	up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 1ed6b52..cdc7526 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -2,7 +2,7 @@
  * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
  * E500 Book E processors.
  *
- * Copyright 2004 Freescale Semiconductor, Inc
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
  *
  * This file contains the routines for initializing the MMU
  * on the 4xx series of chips.
@@ -56,19 +56,13 @@
 
 unsigned int tlbcam_index;
 
-#define NUM_TLBCAMS	(64)
 
 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
 #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
 #endif
 
-struct tlbcam {
-	u32	MAS0;
-	u32	MAS1;
-	unsigned long	MAS2;
-	u32	MAS3;
-	u32	MAS7;
-} TLBCAM[NUM_TLBCAMS];
+#define NUM_TLBCAMS	(64)
+struct tlbcam TLBCAM[NUM_TLBCAMS];
 
 struct tlbcamrange {
 	unsigned long start;
@@ -109,19 +103,6 @@
 	return 0;
 }
 
-void loadcam_entry(int idx)
-{
-	mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
-	mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
-	mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
-	mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
-
-	if (mmu_has_feature(MMU_FTR_BIG_PHYS))
-		mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
-
-	asm volatile("isync;tlbwe;isync" : : : "memory");
-}
-
 /*
  * Set up one of the I/D BAT (block address translation) register pairs.
  * The parameters are not checked; in particular size must be a power
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d7fa50b..e267f22 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -252,6 +252,47 @@
 }
 #endif /* CONFIG_PPC_BOOK3E */
 
+struct vmemmap_backing *vmemmap_list;
+
+static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+{
+	static struct vmemmap_backing *next;
+	static int num_left;
+
+	/* allocate a page when required and hand out chunks */
+	if (!next || !num_left) {
+		next = vmemmap_alloc_block(PAGE_SIZE, node);
+		if (unlikely(!next)) {
+			WARN_ON(1);
+			return NULL;
+		}
+		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
+	}
+
+	num_left--;
+
+	return next++;
+}
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+					    unsigned long start,
+					    int node)
+{
+	struct vmemmap_backing *vmem_back;
+
+	vmem_back = vmemmap_list_alloc(node);
+	if (unlikely(!vmem_back)) {
+		WARN_ON(1);
+		return;
+	}
+
+	vmem_back->phys = phys;
+	vmem_back->virt_addr = start;
+	vmem_back->list = vmemmap_list;
+
+	vmemmap_list = vmem_back;
+}
+
 int __meminit vmemmap_populate(struct page *start_page,
 			       unsigned long nr_pages, int node)
 {
@@ -276,6 +317,8 @@
 		if (!p)
 			return -ENOMEM;
 
+		vmemmap_list_populate(__pa(p), start, node);
+
 		pr_debug("      * %016lx..%016lx allocated at %p\n",
 			 start, start + page_size, p);
 
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1f2d9ff..ddfd7ad 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -395,10 +395,18 @@
 	 * the PID/TID comparison is disabled, so we can use a TID of zero
 	 * to represent all kernel pages as shared among all contexts.
 	 * 	-- Dan
+	 *
+	 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
+	 * should normally never have to steal though the facility is
+	 * present if needed.
+	 *      -- BenH
 	 */
 	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
 		first_context = 0;
 		last_context = 15;
+	} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+		first_context = 1;
+		last_context = 65535;
 	} else {
 		first_context = 1;
 		last_context = 255;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d49a775..63b84a0 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -69,12 +69,7 @@
 }
 #endif /* CONIFG_8xx */
 
-/*
- * As of today, we don't support tlbivax broadcast on any
- * implementation. When that becomes the case, this will be
- * an extern.
- */
-#ifdef CONFIG_PPC_BOOK3E
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
 extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
 			   unsigned int tsize, unsigned int ind);
 #else
@@ -149,7 +144,15 @@
 extern void MMU_init_hw(void);
 extern unsigned long mmu_mapin_ram(unsigned long top);
 extern void adjust_total_lowmem(void);
+extern void loadcam_entry(unsigned int index);
 
+struct tlbcam {
+	u32	MAS0;
+	u32	MAS1;
+	unsigned long	MAS2;
+	u32	MAS3;
+	u32	MAS7;
+};
 #elif defined(CONFIG_PPC32)
 /* anything 32-bit except 4xx or 8xx */
 extern void MMU_init_hw(void);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eaa7633..80d11063 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -33,16 +33,41 @@
 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
 
 int numa_cpu_lookup_table[NR_CPUS];
-cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 struct pglist_data *node_data[MAX_NUMNODES];
 
 EXPORT_SYMBOL(numa_cpu_lookup_table);
-EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_to_cpumask_map);
 EXPORT_SYMBOL(node_data);
 
 static int min_common_depth;
 static int n_mem_addr_cells, n_mem_size_cells;
 
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+	unsigned int node, num = 0;
+
+	/* setup nr_node_ids if not done yet */
+	if (nr_node_ids == MAX_NUMNODES) {
+		for_each_node_mask(node, node_possible_map)
+			num = node;
+		nr_node_ids = num + 1;
+	}
+
+	/* allocate the map */
+	for (node = 0; node < nr_node_ids; node++)
+		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+
+	/* cpumask_of_node() will now work */
+	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
 						unsigned int *nid)
 {
@@ -138,8 +163,8 @@
 
 	dbg("adding cpu %d to node %d\n", cpu, node);
 
-	if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
-		cpu_set(cpu, numa_cpumask_lookup_table[node]);
+	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -149,8 +174,8 @@
 
 	dbg("removing cpu %lu from node %d\n", cpu, node);
 
-	if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
-		cpu_clear(cpu, numa_cpumask_lookup_table[node]);
+	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 	} else {
 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 		       cpu, node);
@@ -246,7 +271,8 @@
 	const unsigned int *ref_points;
 	struct device_node *rtas_root;
 	unsigned int len;
-	struct device_node *options;
+	struct device_node *chosen;
+	const char *vec5;
 
 	rtas_root = of_find_node_by_path("/rtas");
 
@@ -264,14 +290,17 @@
 			"ibm,associativity-reference-points", &len);
 
 	/*
-	 * For type 1 affinity information we want the first field
+	 * For form 1 affinity information we want the first field
 	 */
-	options = of_find_node_by_path("/options");
-	if (options) {
-		const char *str;
-		str = of_get_property(options, "ibm,associativity-form", NULL);
-		if (str && !strcmp(str, "1"))
-                        index = 0;
+#define VEC5_AFFINITY_BYTE	5
+#define VEC5_AFFINITY		0x80
+	chosen = of_find_node_by_path("/chosen");
+	if (chosen) {
+		vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
+		if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
+			dbg("Using form 1 affinity\n");
+			index = 0;
+		}
 	}
 
 	if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
@@ -750,8 +779,9 @@
 		 * If we used a CPU iterator here we would miss printing
 		 * the holes in the cpumap.
 		 */
-		for (cpu = 0; cpu < NR_CPUS; cpu++) {
-			if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+			if (cpumask_test_cpu(cpu,
+					node_to_cpumask_map[node])) {
 				if (count == 0)
 					printk(" %u", cpu);
 				++count;
@@ -763,7 +793,7 @@
 		}
 
 		if (count > 1)
-			printk("-%u", NR_CPUS - 1);
+			printk("-%u", nr_cpu_ids - 1);
 		printk("\n");
 	}
 }
@@ -939,10 +969,6 @@
 	else
 		dump_numa_memory_topology();
 
-	register_cpu_notifier(&ppc64_numa_nb);
-	cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
-			  (void *)(unsigned long)boot_cpuid);
-
 	for_each_online_node(nid) {
 		unsigned long start_pfn, end_pfn;
 		void *bootmem_vaddr;
@@ -996,6 +1022,16 @@
 	}
 
 	init_bootmem_done = 1;
+
+	/*
+	 * Now bootmem is initialised we can create the node to cpumask
+	 * lookup tables and setup the cpu callback to populate them.
+	 */
+	setup_node_to_cpumask_map();
+
+	register_cpu_notifier(&ppc64_numa_nb);
+	cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+			  (void *)(unsigned long)boot_cpuid);
 }
 
 void __init paging_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index b9243e75..9fc02dc 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -146,6 +146,14 @@
 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
 	flags &= ~(_PAGE_USER | _PAGE_EXEC);
 
+#ifdef _PAGE_BAP_SR
+	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+	 * which means that we just cleared supervisor access... oops ;-) This
+	 * restores it
+	 */
+	flags |= _PAGE_BAP_SR;
+#endif
+
 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_flags);
@@ -385,11 +393,7 @@
 		return -EINVAL;
 	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
 	wmb();
-#ifdef CONFIG_PPC_STD_MMU
-	flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
 	flush_tlb_page(NULL, address);
-#endif
 	pte_unmap(kpte);
 
 	return 0;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d95679a..d050fc8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -265,6 +265,14 @@
 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
 	flags &= ~(_PAGE_USER | _PAGE_EXEC);
 
+#ifdef _PAGE_BAP_SR
+	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+	 * which means that we just cleared supervisor access... oops ;-) This
+	 * restores it
+	 */
+	flags |= _PAGE_BAP_SR;
+#endif
+
 	if (ppc_md.ioremap)
 		return ppc_md.ioremap(addr, size, flags, caller);
 	return __ioremap_caller(addr, size, flags, caller);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index bbdc5b5..cfa7682 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -10,7 +10,7 @@
  *	- tlbil_va
  *	- tlbil_pid
  *	- tlbil_all
- *	- tlbivax_bcast (not yet)
+ *	- tlbivax_bcast
  *
  * Code mostly moved over from misc_32.S
  *
@@ -33,6 +33,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor.h>
+#include <asm/bug.h>
 
 #if defined(CONFIG_40x)
 
@@ -65,7 +66,7 @@
  * Nothing to do for 8xx, everything is inline
  */
 
-#elif defined(CONFIG_44x)
+#elif defined(CONFIG_44x) /* Includes 47x */
 
 /*
  * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@@ -73,7 +74,13 @@
  */
 _GLOBAL(__tlbil_va)
 	mfspr	r5,SPRN_MMUCR
-	rlwimi	r5,r4,0,24,31			/* Set TID */
+	mfmsr   r10
+
+	/*
+	 * We write 16 bits of STID since 47x supports that much, we
+	 * will never be passed out of bounds values on 440 (hopefully)
+	 */
+	rlwimi  r5,r4,0,16,31
 
 	/* We have to run the search with interrupts disabled, otherwise
 	 * an interrupt which causes a TLB miss can clobber the MMUCR
@@ -83,24 +90,41 @@
 	 * and restoring MMUCR, so only normal interrupts have to be
 	 * taken care of.
 	 */
-	mfmsr	r4
 	wrteei	0
 	mtspr	SPRN_MMUCR,r5
-	tlbsx.	r3, 0, r3
-	wrtee	r4
-	bne	1f
+	tlbsx.	r6,0,r3
+	bne	10f
 	sync
-	/* There are only 64 TLB entries, so r3 < 64,
-	 * which means bit 22, is clear.  Since 22 is
-	 * the V bit in the TLB_PAGEID, loading this
+BEGIN_MMU_FTR_SECTION
+	b	2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+	/* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
+	 * 22, is clear.  Since 22 is the V bit in the TLB_PAGEID, loading this
 	 * value will invalidate the TLB entry.
 	 */
-	tlbwe	r3, r3, PPC44x_TLB_PAGEID
+	tlbwe	r6,r6,PPC44x_TLB_PAGEID
 	isync
-1:	blr
+10:	wrtee	r10
+	blr
+2:
+#ifdef CONFIG_PPC_47x
+	oris	r7,r6,0x8000	/* specify way explicitely */
+	clrrwi	r4,r3,12	/* get an EPN for the hashing with V = 0 */
+	ori	r4,r4,PPC47x_TLBE_SIZE
+	tlbwe   r4,r7,0		/* write it */
+	isync
+	wrtee	r10
+	blr
+#else /* CONFIG_PPC_47x */
+1:	trap
+	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
 
 _GLOBAL(_tlbil_all)
 _GLOBAL(_tlbil_pid)
+BEGIN_MMU_FTR_SECTION
+	b	2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 	li	r3,0
 	sync
 
@@ -115,6 +139,76 @@
 
 	isync
 	blr
+2:
+#ifdef CONFIG_PPC_47x
+	/* 476 variant. There's not simple way to do this, hopefully we'll
+	 * try to limit the amount of such full invalidates
+	 */
+	mfmsr	r11		/* Interrupts off */
+	wrteei	0
+	li	r3,-1		/* Current set */
+	lis	r10,tlb_47x_boltmap@h
+	ori	r10,r10,tlb_47x_boltmap@l
+	lis	r7,0x8000	/* Specify way explicitely */
+
+	b	9f		/* For each set */
+
+1:	li	r9,4		/* Number of ways */
+	li	r4,0		/* Current way */
+	li	r6,0		/* Default entry value 0 */
+	andi.	r0,r8,1		/* Check if way 0 is bolted */
+	mtctr	r9		/* Load way counter */
+	bne-	3f		/* Bolted, skip loading it */
+
+2:	/* For each way */
+	or	r5,r3,r4	/* Make way|index for tlbre */
+	rlwimi	r5,r5,16,8,15	/* Copy index into position */
+	tlbre	r6,r5,0		/* Read entry */
+3:	addis	r4,r4,0x2000	/* Next way */
+	andi.	r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
+	beq	4f		/* Nope, skip it */
+	rlwimi	r7,r5,0,1,2	/* Insert way number */
+	rlwinm	r6,r6,0,21,19	/* Clear V */
+	tlbwe   r6,r7,0		/* Write it */
+4:	bdnz	2b		/* Loop for each way */
+	srwi	r8,r8,1		/* Next boltmap bit */
+9:	cmpwi	cr1,r3,255	/* Last set done ? */
+	addi	r3,r3,1		/* Next set */
+	beq	cr1,1f		/* End of loop */
+	andi.	r0,r3,0x1f	/* Need to load a new boltmap word ? */
+	bne	1b		/* No, loop */
+	lwz	r8,0(r10)	/* Load boltmap entry */
+	addi	r10,r10,4	/* Next word */
+	b	1b		/* Then loop */
+1:	isync			/* Sync shadows */
+	wrtee	r11
+#else /* CONFIG_PPC_47x */
+1:	trap
+	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
+	blr
+
+#ifdef CONFIG_PPC_47x
+/*
+ * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
+ * check though, it will blow up soon enough if we mistakenly try
+ * to use it on a 440.
+ */
+_GLOBAL(_tlbivax_bcast)
+	mfspr	r5,SPRN_MMUCR
+	mfmsr	r10
+	rlwimi	r5,r4,0,16,31
+	wrteei	0
+	mtspr	SPRN_MMUCR,r5
+/*	tlbivax	0,r3 - use .long to avoid binutils deps */
+	.long 0x7c000624 | (r3 << 11)
+	isync
+	eieio
+	tlbsync
+	sync
+	wrtee	r10
+	blr
+#endif /* CONFIG_PPC_47x */
 
 #elif defined(CONFIG_FSL_BOOKE)
 /*
@@ -271,3 +365,31 @@
 #else
 #error Unsupported processor type !
 #endif
+
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+	LOAD_REG_ADDR(r4, TLBCAM)
+	mulli	r5,r3,TLBCAM_SIZE
+	add	r3,r5,r4
+	lwz	r4,TLBCAM_MAS0(r3)
+	mtspr	SPRN_MAS0,r4
+	lwz	r4,TLBCAM_MAS1(r3)
+	mtspr	SPRN_MAS1,r4
+	PPC_LL	r4,TLBCAM_MAS2(r3)
+	mtspr	SPRN_MAS2,r4
+	lwz	r4,TLBCAM_MAS3(r3)
+	mtspr	SPRN_MAS3,r4
+BEGIN_MMU_FTR_SECTION
+	lwz	r4,TLBCAM_MAS7(r3)
+	mtspr	SPRN_MAS7,r4
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
+	isync
+	tlbwe
+	isync
+	blr
+#endif
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7486bff..eeba0a7 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -1,3 +1,12 @@
+config PPC_47x
+	bool "Support for 47x variant"
+	depends on 44x
+	default n
+	select MPIC
+	help
+	  This option enables support for the 47x family of processors and is
+	  not currently compatible with other 44x or 46x varients
+
 config BAMBOO
 	bool "Bamboo"
 	depends on 44x
@@ -151,6 +160,17 @@
 	help
 	  This option enables support for the AMCC PPC440EP evaluation board.
 
+config ISS4xx
+	bool "ISS 4xx Simulator"
+	depends on (44x || 40x)
+	default n
+	select 405GP if 40x
+	select 440GP if 44x && !PPC_47x
+	select PPC_FPU
+	select OF_RTC
+	help
+	  This option enables support for the IBM ISS simulation environment
+
 #config LUAN
 #	bool "Luan"
 #	depends on 44x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index ee6185a..82ff326 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_WARP)	+= warp.o
 obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
 obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
+obj-$(CONFIG_ISS4xx)	+= iss4xx.o
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
new file mode 100644
index 0000000..aa46e9d
--- /dev/null
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -0,0 +1,167 @@
+/*
+ * PPC476 board specific routines
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *    Matt Porter <mporter@kernel.crashing.org>
+ *    Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *    Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *    Copyright (c) 2003-2005 Zultys Technologies
+ *
+ *    Rewritten and ported to the merged powerpc tree:
+ *    Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+static __initdata struct of_device_id iss4xx_of_bus[] = {
+	{ .compatible = "ibm,plb4", },
+	{ .compatible = "ibm,plb6", },
+	{ .compatible = "ibm,opb", },
+	{ .compatible = "ibm,ebc", },
+	{},
+};
+
+static int __init iss4xx_device_probe(void)
+{
+	of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
+	of_instantiate_rtc();
+
+	return 0;
+}
+machine_device_initcall(iss4xx, iss4xx_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init iss4xx_init_irq(void)
+{
+	struct device_node *np;
+
+	/* Find top level interrupt controller */
+	for_each_node_with_property(np, "interrupt-controller") {
+		if (of_get_property(np, "interrupts", NULL) == NULL)
+			break;
+	}
+	if (np == NULL)
+		panic("Can't find top level interrupt controller");
+
+	/* Check type and do appropriate initialization */
+	if (of_device_is_compatible(np, "ibm,uic")) {
+		uic_init_tree();
+		ppc_md.get_irq = uic_get_irq;
+#ifdef CONFIG_MPIC
+	} else if (of_device_is_compatible(np, "chrp,open-pic")) {
+		/* The MPIC driver will get everything it needs from the
+		 * device-tree, just pass 0 to all arguments
+		 */
+		struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+					       " MPIC     ");
+		BUG_ON(mpic == NULL);
+		mpic_init(mpic);
+		ppc_md.get_irq = mpic_get_irq;
+#endif
+	} else
+		panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
+{
+	mpic_setup_this_cpu();
+}
+
+static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
+{
+	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+	const u64 *spin_table_addr_prop;
+	u32 *spin_table;
+	extern void start_secondary_47x(void);
+
+	BUG_ON(cpunode == NULL);
+
+	/* Assume spin table. We could test for the enable-method in
+	 * the device-tree but currently there's little point as it's
+	 * our only supported method
+	 */
+	spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
+					       NULL);
+	if (spin_table_addr_prop == NULL) {
+		pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
+		return;
+	}
+
+	/* Assume it's mapped as part of the linear mapping. This is a bit
+	 * fishy but will work fine for now
+	 */
+	spin_table = (u32 *)__va(*spin_table_addr_prop);
+	pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+	spin_table[3] = cpu;
+	smp_wmb();
+	spin_table[1] = __pa(start_secondary_47x);
+	mb();
+}
+
+static struct smp_ops_t iss_smp_ops = {
+	.probe		= smp_mpic_probe,
+	.message_pass	= smp_mpic_message_pass,
+	.setup_cpu	= smp_iss4xx_setup_cpu,
+	.kick_cpu	= smp_iss4xx_kick_cpu,
+	.give_timebase	= smp_generic_give_timebase,
+	.take_timebase	= smp_generic_take_timebase,
+};
+
+static void __init iss4xx_smp_init(void)
+{
+	if (mmu_has_feature(MMU_FTR_TYPE_47x))
+		smp_ops = &iss_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init iss4xx_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init iss4xx_setup_arch(void)
+{
+	iss4xx_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init iss4xx_probe(void)
+{
+	unsigned long root = of_get_flat_dt_root();
+
+	if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+		return 0;
+
+	return 1;
+}
+
+define_machine(iss4xx) {
+	.name			= "ISS-4xx",
+	.probe			= iss4xx_probe,
+	.progress		= udbg_progress,
+	.init_IRQ		= iss4xx_init_irq,
+	.setup_arch		= iss4xx_setup_arch,
+	.restart		= ppc4xx_reset_system,
+	.calibrate_decr		= generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 0b4f883..ae525e4 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -74,6 +74,7 @@
 static struct of_device_id __initdata of_bus_ids[] = {
 	{ .compatible = "simple-bus" },
 	{ .compatible = "gianfar" },
+	{ .compatible = "gpio-leds", },
 	{},
 };
 
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index a1908d2..e00801c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -72,6 +72,7 @@
 	{ .compatible = "soc", },
 	{ .compatible = "simple-bus", },
 	{ .compatible = "gianfar", },
+	{ .compatible = "gpio-leds", },
 	{},
 };
 
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 5abe137..018cc67 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -83,7 +83,8 @@
 	{ .compatible = "fsl,mpc8610-immr", },
 	{ .compatible = "fsl,mpc8610-guts", },
 	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
+	/* So that the DMA channel nodes can be probed individually: */
+	{ .compatible = "fsl,eloplus-dma", },
 	{}
 };
 
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a8aae0b..d361f81 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -43,7 +43,7 @@
 	select PPC_PCI_CHOICE
 
 config 44x
-	bool "AMCC 44x"
+	bool "AMCC 44x, 46x or 47x"
 	select PPC_DCR_NATIVE
 	select PPC_UDBG_16550
 	select 4xx_SOC
@@ -294,7 +294,7 @@
          This enables the powerpc-specific perf_event back-end.
 
 config SMP
-	depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
+	depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
 	bool "Symmetric multi-processing support"
 	---help---
 	  This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@
 config NOT_COHERENT_CACHE
 	bool
 	depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+	default n if PPC_47x
 	default y
 
 config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index e6506cd..bfa2c0c 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@
 	policy->cur = cbe_freqs[cur_pmode].frequency;
 
 #ifdef CONFIG_SMP
-	cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+	cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
 	cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index fba5bf9..32a56c6 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -252,8 +252,8 @@
 	li	r11,1
 	ld	r12,PACALPPACAPTR(r13)
 	stb	r11,LPPACADECRINT(r12)
-	LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
-	lwz	r12,0(r12)
+	li	r12,-1
+	clrldi	r12,r12,33	/* set DEC to 0x7fffffff */
 	mtspr	SPRN_DEC,r12
 	/* fall through */
 
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index b841c9a..3fc2e64 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/ratelimit.h>
 
 #include <asm/types.h>
 #include <asm/io.h>
@@ -584,14 +585,9 @@
 
 	orig_addr = (unsigned long __force)addr;
 	if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
-		static unsigned long last_jiffies;
-		static int num_printed;
+		static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
 
-		if (time_after(jiffies, last_jiffies + 60 * HZ)) {
-			last_jiffies = jiffies;
-			num_printed = 0;
-		}
-		if (num_printed++ < 10)
+		if (__ratelimit(&ratelimit))
 			printk(KERN_ERR
 				"iSeries_%s: invalid access at IO address %p\n",
 				func, addr);
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 722335e..6590850 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,7 +83,7 @@
 
 static int smp_iSeries_probe(void)
 {
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index d35e052..c16537b 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -213,7 +213,7 @@
 	pr_debug("current astate is at %d\n",cur_astate);
 
 	policy->cur = pas_freqs[cur_astate].frequency;
-	cpumask_copy(policy->cpus, &cpu_online_map);
+	cpumask_copy(policy->cpus, cpu_online_mask);
 
 	ppc_proc_freq = policy->cur * 1000ul;
 
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 3ca09d3..9650c602 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@
 	/* secondary CPUs are tied to the primary one by the
 	 * cpufreq core if in the secondary policy we tell it that
 	 * it actually must be one policy together with all others. */
-	cpumask_copy(policy->cpus, &cpu_online_map);
+	cpumask_copy(policy->cpus, cpu_online_mask);
 	cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
 
 	return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f45331a..06a137c 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -592,7 +592,7 @@
 	/* Probe keywest-i2c busses */
 	for_each_compatible_node(np, "i2c","keywest-i2c") {
 		struct pmac_i2c_host_kw *host;
-		int multibus, chans, i;
+		int multibus;
 
 		/* Found one, init a host structure */
 		host = kw_i2c_host_init(np);
@@ -614,6 +614,8 @@
 		 * parent type
 		 */
 		if (multibus) {
+			int chans, i;
+
 			parent = of_get_parent(np);
 			if (parent == NULL)
 				continue;
@@ -1258,8 +1260,7 @@
 	if (inst == NULL)
 		return;
 	pmac_i2c_close(inst->bus);
-	if (inst)
-		kfree(inst);
+	kfree(inst);
 }
 
 static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 3362e78..f0bc08f 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,8 @@
 extern void pmac_check_ht_link(void);
 
 extern void pmac_setup_smp(void);
+extern void pmac32_cpu_die(void);
+extern void low_cpu_die(void) __attribute__((noreturn));
 
 extern int pmac_nvram_init(void);
 extern void pmac_pic_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 15c2241..f1d0132 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -480,7 +480,7 @@
 #endif
 
 	/* SMP Init has to be done early as we need to patch up
-	 * cpu_possible_map before interrupt stacks are allocated
+	 * cpu_possible_mask before interrupt stacks are allocated
 	 * or kaboom...
 	 */
 #ifdef CONFIG_SMP
@@ -646,7 +646,7 @@
 /* access per cpu vars from generic smp.c */
 DECLARE_PER_CPU(int, cpu_state);
 
-static void pmac_cpu_die(void)
+static void pmac64_cpu_die(void)
 {
 	/*
 	 * turn off as much as possible, we'll be
@@ -717,8 +717,13 @@
 	.pcibios_after_init	= pmac_pcibios_after_init,
 	.phys_mem_access_prot	= pci_phys_mem_access_prot,
 #endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
-	.cpu_die		= pmac_cpu_die,
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC64
+	.cpu_die		= pmac64_cpu_die,
+#endif
+#ifdef CONFIG_PPC32
+	.cpu_die		= pmac32_cpu_die,
+#endif
 #endif
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
 	.cpu_die		= generic_mach_cpu_die,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6898e82..c95215f 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -53,6 +53,8 @@
 #include <asm/pmac_low_i2c.h>
 #include <asm/pmac_pfunc.h>
 
+#include "pmac.h"
+
 #undef DEBUG
 
 #ifdef DEBUG
@@ -315,7 +317,7 @@
 	/* This is necessary because OF doesn't know about the
 	 * secondary cpu(s), and thus there aren't nodes in the
 	 * device tree for them, and smp_setup_cpu_maps hasn't
-	 * set their bits in cpu_present_map.
+	 * set their bits in cpu_present_mask.
 	 */
 	if (ncpus > NR_CPUS)
 		ncpus = NR_CPUS;
@@ -878,10 +880,9 @@
 	return 0;
 }
 
-extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
 static int cpu_dead[NR_CPUS];
 
-void cpu_die(void)
+void pmac32_cpu_die(void)
 {
 	local_irq_disable();
 	cpu_dead[smp_processor_id()] = 1;
@@ -944,7 +945,7 @@
 	}
 #ifdef CONFIG_PPC32
 	else {
-		/* We have to set bits in cpu_possible_map here since the
+		/* We have to set bits in cpu_possible_mask here since the
 		 * secondary CPU(s) aren't in the device tree. Various
 		 * things won't be initialized for CPUs not in the possible
 		 * map, so we really need to fix it up here.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 0ff5174..3dbef30 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,7 +7,7 @@
 endif
 
 obj-y			:= lpar.o hvCall.o nvram.o reconfig.o \
-			   setup.o iommu.o ras.o \
+			   setup.o iommu.o event_sources.o ras.o \
 			   firmware.o power.o dlpar.o
 obj-$(CONFIG_SMP)	+= smp.o
 obj-$(CONFIG_XICS)	+= xics.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e1682bc..d71e585 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -79,13 +79,12 @@
 	 * prepend this to the full_name.
 	 */
 	name = (char *)ccwa + ccwa->name_offset;
-	dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+	dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
 	if (!dn->full_name) {
 		kfree(dn);
 		return NULL;
 	}
 
-	sprintf(dn->full_name, "/%s", name);
 	return dn;
 }
 
@@ -410,15 +409,13 @@
 	 * directory of the device tree.  CPUs actually live in the
 	 * cpus directory so we need to fixup the full_name.
 	 */
-	cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
-			   GFP_KERNEL);
+	cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
 	if (!cpu_name) {
 		dlpar_free_cc_nodes(dn);
 		rc = -ENOMEM;
 		goto out;
 	}
 
-	sprintf(cpu_name, "/cpus%s", dn->full_name);
 	kfree(dn->full_name);
 	dn->full_name = cpu_name;
 
@@ -433,6 +430,7 @@
 	if (rc) {
 		dlpar_release_drc(drc_index);
 		dlpar_free_cc_nodes(dn);
+		goto out;
 	}
 
 	rc = dlpar_online_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 7df7fbb..34b7dc1 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -749,7 +749,7 @@
 	/* Determine type of EEH reset required by device,
 	 * default hot reset or fundamental reset
 	 */
-	if (dev->needs_freset)
+	if (dev && dev->needs_freset)
 		rtas_pci_slot_reset(pdn, 3);
 	else
 		rtas_pci_slot_reset(pdn, 1);
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
new file mode 100644
index 0000000..e889c9d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2001 Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <asm/prom.h>
+
+#include "pseries.h"
+
+void request_event_sources_irqs(struct device_node *np,
+				irq_handler_t handler,
+				const char *name)
+{
+	int i, index, count = 0;
+	struct of_irq oirq;
+	const u32 *opicprop;
+	unsigned int opicplen;
+	unsigned int virqs[16];
+
+	/* Check for obsolete "open-pic-interrupt" property. If present, then
+	 * map those interrupts using the default interrupt host and default
+	 * trigger
+	 */
+	opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
+	if (opicprop) {
+		opicplen /= sizeof(u32);
+		for (i = 0; i < opicplen; i++) {
+			if (count > 15)
+				break;
+			virqs[count] = irq_create_mapping(NULL, *(opicprop++));
+			if (virqs[count] == NO_IRQ)
+				printk(KERN_ERR "Unable to allocate interrupt "
+				       "number for %s\n", np->full_name);
+			else
+				count++;
+
+		}
+	}
+	/* Else use normal interrupt tree parsing */
+	else {
+		/* First try to do a proper OF tree parsing */
+		for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+		     index++) {
+			if (count > 15)
+				break;
+			virqs[count] = irq_create_of_mapping(oirq.controller,
+							    oirq.specifier,
+							    oirq.size);
+			if (virqs[count] == NO_IRQ)
+				printk(KERN_ERR "Unable to allocate interrupt "
+				       "number for %s\n", np->full_name);
+			else
+				count++;
+		}
+	}
+
+	/* Now request them */
+	for (i = 0; i < count; i++) {
+		if (request_irq(virqs[i], handler, 0, name, NULL)) {
+			printk(KERN_ERR "Unable to request interrupt %d for "
+			       "%s\n", virqs[i], np->full_name);
+			return;
+		}
+	}
+}
+
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a8e1d5d..8f85f39 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -154,30 +154,6 @@
 	for(;;);
 }
 
-static int qcss_tok;	/* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- *	0	- The processor is in the RTAS stopped state
- *	1	- stop-self is in progress
- *	2	- The processor is not in the RTAS stopped state
- *	-1	- Hardware Error
- *	-2	- Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
-	int cpu_status, status;
-
-	status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
-	if (status != 0) {
-		printk(KERN_ERR
-		       "RTAS query-cpu-stopped-state failed: %i\n", status);
-		return status;
-	}
-
-	return cpu_status;
-}
-
 static int pseries_cpu_disable(void)
 {
 	int cpu = smp_processor_id();
@@ -187,7 +163,7 @@
 
 	/*fix boot_cpuid here*/
 	if (cpu == boot_cpuid)
-		boot_cpuid = any_online_cpu(cpu_online_map);
+		boot_cpuid = cpumask_any(cpu_online_mask);
 
 	/* FIXME: abstract this to not be platform specific later on */
 	xics_migrate_irqs_away();
@@ -224,8 +200,9 @@
 	} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
 
 		for (tries = 0; tries < 25; tries++) {
-			cpu_status = query_cpu_stopped(pcpu);
-			if (cpu_status == 0 || cpu_status == -1)
+			cpu_status = smp_query_cpu_stopped(pcpu);
+			if (cpu_status == QCSS_STOPPED ||
+			    cpu_status == QCSS_HARDWARE_ERROR)
 				break;
 			cpu_relax();
 		}
@@ -245,7 +222,7 @@
 }
 
 /*
- * Update cpu_present_map and paca(s) for a new cpu node.  The wrinkle
+ * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
  * here is that a cpu device node may represent up to two logical cpus
  * in the SMT case.  We must honor the assumption in other code that
  * the logical ids for sibling SMT threads x and y are adjacent, such
@@ -254,7 +231,7 @@
 static int pseries_add_processor(struct device_node *np)
 {
 	unsigned int cpu;
-	cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+	cpumask_var_t candidate_mask, tmp;
 	int err = -ENOSPC, len, nthreads, i;
 	const u32 *intserv;
 
@@ -262,48 +239,53 @@
 	if (!intserv)
 		return 0;
 
+	zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
+	zalloc_cpumask_var(&tmp, GFP_KERNEL);
+
 	nthreads = len / sizeof(u32);
 	for (i = 0; i < nthreads; i++)
-		cpu_set(i, tmp);
+		cpumask_set_cpu(i, tmp);
 
 	cpu_maps_update_begin();
 
-	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
 
 	/* Get a bitmap of unoccupied slots. */
-	cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
-	if (cpus_empty(candidate_map)) {
+	cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
+	if (cpumask_empty(candidate_mask)) {
 		/* If we get here, it most likely means that NR_CPUS is
 		 * less than the partition's max processors setting.
 		 */
 		printk(KERN_ERR "Cannot add cpu %s; this system configuration"
 		       " supports %d logical cpus.\n", np->full_name,
-		       cpus_weight(cpu_possible_map));
+		       cpumask_weight(cpu_possible_mask));
 		goto out_unlock;
 	}
 
-	while (!cpus_empty(tmp))
-		if (cpus_subset(tmp, candidate_map))
+	while (!cpumask_empty(tmp))
+		if (cpumask_subset(tmp, candidate_mask))
 			/* Found a range where we can insert the new cpu(s) */
 			break;
 		else
-			cpus_shift_left(tmp, tmp, nthreads);
+			cpumask_shift_left(tmp, tmp, nthreads);
 
-	if (cpus_empty(tmp)) {
-		printk(KERN_ERR "Unable to find space in cpu_present_map for"
+	if (cpumask_empty(tmp)) {
+		printk(KERN_ERR "Unable to find space in cpu_present_mask for"
 		       " processor %s with %d thread(s)\n", np->name,
 		       nthreads);
 		goto out_unlock;
 	}
 
-	for_each_cpu_mask(cpu, tmp) {
-		BUG_ON(cpu_isset(cpu, cpu_present_map));
+	for_each_cpu(cpu, tmp) {
+		BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
 		set_cpu_present(cpu, true);
 		set_hard_smp_processor_id(cpu, *intserv++);
 	}
 	err = 0;
 out_unlock:
 	cpu_maps_update_done();
+	free_cpumask_var(candidate_mask);
+	free_cpumask_var(tmp);
 	return err;
 }
 
@@ -334,7 +316,7 @@
 			set_hard_smp_processor_id(cpu, -1);
 			break;
 		}
-		if (cpu == NR_CPUS)
+		if (cpu >= nr_cpu_ids)
 			printk(KERN_WARNING "Could not find cpu to remove "
 			       "with physical id 0x%x\n", intserv[i]);
 	}
@@ -388,6 +370,7 @@
 	struct device_node *np;
 	const char *typep;
 	int cpu;
+	int qcss_tok;
 
 	for_each_node_by_name(np, "interrupt-controller") {
 		typep = of_get_property(np, "compatible", NULL);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 383a5d0..48d2057 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -228,3 +228,41 @@
 	mtcrf	0xff,r0
 
 	blr				/* return r3 = status */
+
+/* See plpar_hcall_raw to see why this is needed */
+_GLOBAL(plpar_hcall9_raw)
+	HMT_MEDIUM
+
+	mfcr	r0
+	stw	r0,8(r1)
+
+	std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
+
+	mr	r4,r5
+	mr	r5,r6
+	mr	r6,r7
+	mr	r7,r8
+	mr	r8,r9
+	mr	r9,r10
+	ld	r10,STK_PARM(r11)(r1)	 /* put arg7 in R10 */
+	ld	r11,STK_PARM(r12)(r1)	 /* put arg8 in R11 */
+	ld	r12,STK_PARM(r13)(r1)    /* put arg9 in R12 */
+
+	HVSC				/* invoke the hypervisor */
+
+	mr	r0,r12
+	ld	r12,STK_PARM(r4)(r1)
+	std	r4,  0(r12)
+	std	r5,  8(r12)
+	std	r6, 16(r12)
+	std	r7, 24(r12)
+	std	r8, 32(r12)
+	std	r9, 40(r12)
+	std	r10,48(r12)
+	std	r11,56(r12)
+	std	r0, 64(r12)
+
+	lwz	r0,8(r1)
+	mtcrf	0xff,r0
+
+	blr				/* return r3 = status */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0707653..cf79b46 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -367,21 +367,28 @@
 {
 	unsigned long size_bytes = 1UL << ppc64_pft_size;
 	unsigned long hpte_count = size_bytes >> 4;
-	unsigned long dummy1, dummy2, dword0;
+	struct {
+		unsigned long pteh;
+		unsigned long ptel;
+	} ptes[4];
 	long lpar_rc;
-	int i;
+	int i, j;
 
-	/* TODO: Use bulk call */
-	for (i = 0; i < hpte_count; i++) {
-		/* dont remove HPTEs with VRMA mappings */
-		lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG,
-						&dummy1, &dummy2);
-		if (lpar_rc == H_NOT_FOUND) {
-			lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1);
-			if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK)
-				!= HPTE_V_VRMA_MASK))
-				/* Can be hpte for 1TB Seg. So remove it */
-				plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
+	/* Read in batches of 4,
+	 * invalidate only valid entries not in the VRMA
+	 * hpte_count will be a multiple of 4
+         */
+	for (i = 0; i < hpte_count; i += 4) {
+		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
+		if (lpar_rc != H_SUCCESS)
+			continue;
+		for (j = 0; j < 4; j++){
+			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
+				HPTE_V_VRMA_MASK)
+				continue;
+			if (ptes[j].pteh & HPTE_V_VALID)
+				plpar_pte_remove_raw(0, i + j, 0,
+					&(ptes[j].pteh), &(ptes[j].ptel));
 		}
 	}
 }
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a05f8d4..d980111 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
 #include <asm/hvcall.h>
 #include <asm/page.h>
 
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
 static inline long poll_pending(void)
 {
 	return plpar_hcall_norets(H_POLL_PENDING);
@@ -183,6 +191,24 @@
 	return rc;
 }
 
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+					unsigned long *ptes)
+
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+	rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+	return rc;
+}
+
 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
 		unsigned long avpn)
 {
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9e17c0d..40c93ca 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -10,6 +10,13 @@
 #ifndef _PSERIES_PSERIES_H
 #define _PSERIES_PSERIES_H
 
+#include <linux/interrupt.h>
+
+struct device_node;
+
+extern void request_event_sources_irqs(struct device_node *np,
+				       irq_handler_t handler, const char *name);
+
 extern void __init fw_feature_init(const char *hypertas, unsigned long len);
 
 struct pt_regs;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index db940d2..41a3e9a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -67,63 +67,6 @@
 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
 
 
-static void request_ras_irqs(struct device_node *np,
-			irq_handler_t handler,
-			const char *name)
-{
-	int i, index, count = 0;
-	struct of_irq oirq;
-	const u32 *opicprop;
-	unsigned int opicplen;
-	unsigned int virqs[16];
-
-	/* Check for obsolete "open-pic-interrupt" property. If present, then
-	 * map those interrupts using the default interrupt host and default
-	 * trigger
-	 */
-	opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
-	if (opicprop) {
-		opicplen /= sizeof(u32);
-		for (i = 0; i < opicplen; i++) {
-			if (count > 15)
-				break;
-			virqs[count] = irq_create_mapping(NULL, *(opicprop++));
-			if (virqs[count] == NO_IRQ)
-				printk(KERN_ERR "Unable to allocate interrupt "
-				       "number for %s\n", np->full_name);
-			else
-				count++;
-
-		}
-	}
-	/* Else use normal interrupt tree parsing */
-	else {
-		/* First try to do a proper OF tree parsing */
-		for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
-		     index++) {
-			if (count > 15)
-				break;
-			virqs[count] = irq_create_of_mapping(oirq.controller,
-							    oirq.specifier,
-							    oirq.size);
-			if (virqs[count] == NO_IRQ)
-				printk(KERN_ERR "Unable to allocate interrupt "
-				       "number for %s\n", np->full_name);
-			else
-				count++;
-		}
-	}
-
-	/* Now request them */
-	for (i = 0; i < count; i++) {
-		if (request_irq(virqs[i], handler, 0, name, NULL)) {
-			printk(KERN_ERR "Unable to request interrupt %d for "
-			       "%s\n", virqs[i], np->full_name);
-			return;
-		}
-	}
-}
-
 /*
  * Initialize handlers for the set of interrupts caused by hardware errors
  * and power system events.
@@ -138,14 +81,15 @@
 	/* Internal Errors */
 	np = of_find_node_by_path("/event-sources/internal-errors");
 	if (np != NULL) {
-		request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
+		request_event_sources_irqs(np, ras_error_interrupt,
+					   "RAS_ERROR");
 		of_node_put(np);
 	}
 
 	/* EPOW Events */
 	np = of_find_node_by_path("/event-sources/epow-events");
 	if (np != NULL) {
-		request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
+		request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
 		of_node_put(np);
 	}
 
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6710761..a6d19e3 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -496,13 +496,14 @@
 }
 
 
-DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
+DECLARE_PER_CPU(long, smt_snooze_delay);
 
 static void pseries_dedicated_idle_sleep(void)
 { 
 	unsigned int cpu = smp_processor_id();
 	unsigned long start_snooze;
 	unsigned long in_purr, out_purr;
+	long snooze = __get_cpu_var(smt_snooze_delay);
 
 	/*
 	 * Indicate to the HV that we are idle. Now would be
@@ -517,13 +518,12 @@
 	 * has been checked recently.  If we should poll for a little
 	 * while, do so.
 	 */
-	if (__get_cpu_var(smt_snooze_delay)) {
-		start_snooze = get_tb() +
-			__get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec;
+	if (snooze) {
+		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
 		local_irq_enable();
 		set_thread_flag(TIF_POLLING_NRFLAG);
 
-		while (get_tb() < start_snooze) {
+		while ((snooze < 0) || (get_tb() < start_snooze)) {
 			if (need_resched() || cpu_is_offline(cpu))
 				goto out;
 			ppc64_runlatch_off();
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4e7f89a..3b1bf61 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -55,7 +55,29 @@
  * The Primary thread of each non-boot processor was started from the OF client
  * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
  */
-static cpumask_t of_spin_map;
+static cpumask_var_t of_spin_mask;
+
+/* Query where a cpu is now.  Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+	int cpu_status, status;
+	int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+	if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+		printk(KERN_INFO "Firmware doesn't support "
+				"query-cpu-stopped-state\n");
+		return QCSS_HARDWARE_ERROR;
+	}
+
+	status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+	if (status != 0) {
+		printk(KERN_ERR
+		       "RTAS query-cpu-stopped-state failed: %i\n", status);
+		return status;
+	}
+
+	return cpu_status;
+}
 
 /**
  * smp_startup_cpu() - start the given cpu
@@ -76,12 +98,18 @@
 	unsigned int pcpu;
 	int start_cpu;
 
-	if (cpu_isset(lcpu, of_spin_map))
+	if (cpumask_test_cpu(lcpu, of_spin_mask))
 		/* Already started by OF and sitting in spin loop */
 		return 1;
 
 	pcpu = get_hard_smp_processor_id(lcpu);
 
+	/* Check to see if the CPU out of FW already for kexec */
+	if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+		cpumask_set_cpu(lcpu, of_spin_mask);
+		return 1;
+	}
+
 	/* Fixup atomic count: it exited inside IRQ handler. */
 	task_thread_info(paca[lcpu].__current)->preempt_count	= 0;
 
@@ -115,7 +143,7 @@
 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
 		vpa_init(cpu);
 
-	cpu_clear(cpu, of_spin_map);
+	cpumask_clear_cpu(cpu, of_spin_mask);
 	set_cpu_current_state(cpu, CPU_STATE_ONLINE);
 	set_default_offline_state(cpu);
 
@@ -186,17 +214,19 @@
 
 	pr_debug(" -> smp_init_pSeries()\n");
 
+	alloc_bootmem_cpumask_var(&of_spin_mask);
+
 	/* Mark threads which are still spinning in hold loops. */
 	if (cpu_has_feature(CPU_FTR_SMT)) {
 		for_each_present_cpu(i) { 
 			if (cpu_thread_in_core(i) == 0)
-				cpu_set(i, of_spin_map);
+				cpumask_set_cpu(i, of_spin_mask);
 		}
 	} else {
-		of_spin_map = cpu_present_map;
+		cpumask_copy(of_spin_mask, cpu_present_mask);
 	}
 
-	cpu_clear(boot_cpuid, of_spin_map);
+	cpumask_clear_cpu(boot_cpuid, of_spin_mask);
 
 	/* Non-lpar has additional take/give timebase */
 	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1bcedd8..f19d194 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -163,29 +163,37 @@
 /* Interface to generic irq subsystem */
 
 #ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq, cpumask_t cpumask,
+/*
+ * For the moment we only implement delivery to all cpus or one cpu.
+ *
+ * If the requested affinity is cpu_all_mask, we set global affinity.
+ * If not we set it to the first cpu in the mask, even if multiple cpus
+ * are set. This is so things like irqbalance (which set core and package
+ * wide affinities) do the right thing.
+ */
+static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
 			  unsigned int strict_check)
 {
-	int server;
-	/* For the moment only implement delivery to all cpus or one cpu */
-	cpumask_t tmp = CPU_MASK_NONE;
 
 	if (!distribute_irqs)
 		return default_server;
 
-	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
-		cpus_and(tmp, cpu_online_map, cpumask);
+	if (!cpumask_equal(cpumask, cpu_all_mask)) {
+		int server = cpumask_first_and(cpu_online_mask, cpumask);
 
-		server = first_cpu(tmp);
-
-		if (server < NR_CPUS)
+		if (server < nr_cpu_ids)
 			return get_hard_smp_processor_id(server);
 
 		if (strict_check)
 			return -1;
 	}
 
-	if (cpus_equal(cpu_online_map, cpu_present_map))
+	/*
+	 * Workaround issue with some versions of JS20 firmware that
+	 * deliver interrupts to cpus which haven't been started. This
+	 * happens when using the maxcpus= boot option.
+	 */
+	if (cpumask_equal(cpu_online_mask, cpu_present_mask))
 		return default_distrib_server;
 
 	return default_server;
@@ -207,7 +215,7 @@
 	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
 		return;
 
-	server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0);
+	server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
 
 	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
 				DEFAULT_PRIORITY);
@@ -398,11 +406,7 @@
 		return -1;
 	}
 
-	/*
-	 * For the moment only implement delivery to all cpus or one cpu.
-	 * Get current irq_server for the given irq
-	 */
-	irq_server = get_irq_server(virq, *cpumask, 1);
+	irq_server = get_irq_server(virq, cpumask, 1);
 	if (irq_server == -1) {
 		char cpulist[128];
 		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -611,7 +615,7 @@
 {
 	xics_request_ipi();
 
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 #endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 6478eb1..83f5196 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -16,6 +16,7 @@
 #include <linux/of_gpio.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
+#include <linux/irq.h>
 
 #define MPC8XXX_GPIO_PINS	32
 
@@ -35,6 +36,7 @@
 	 * open drain mode safely
 	 */
 	u32 data;
+	struct irq_host *irq;
 };
 
 static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -128,12 +130,136 @@
 	return 0;
 }
 
+static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+	if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS)
+		return irq_create_mapping(mpc8xxx_gc->irq, offset);
+	else
+		return -ENXIO;
+}
+
+static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned int mask;
+
+	mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+	if (mask)
+		generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
+						     32 - ffs(mask)));
+}
+
+static void mpc8xxx_irq_unmask(unsigned int virq)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_mask(unsigned int virq)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_ack(unsigned int virq)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+
+	out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+}
+
+static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long flags;
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_FALLING:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		setbits32(mm->regs + GPIO_ICR,
+			  mpc8xxx_gpio2mask(virq_to_hw(virq)));
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrbits32(mm->regs + GPIO_ICR,
+			  mpc8xxx_gpio2mask(virq_to_hw(virq)));
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct irq_chip mpc8xxx_irq_chip = {
+	.name		= "mpc8xxx-gpio",
+	.unmask		= mpc8xxx_irq_unmask,
+	.mask		= mpc8xxx_irq_mask,
+	.ack		= mpc8xxx_irq_ack,
+	.set_type	= mpc8xxx_irq_set_type,
+};
+
+static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
+				irq_hw_number_t hw)
+{
+	set_irq_chip_data(virq, h->host_data);
+	set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+	set_irq_type(virq, IRQ_TYPE_NONE);
+
+	return 0;
+}
+
+static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct,
+				  const u32 *intspec, unsigned int intsize,
+				  irq_hw_number_t *out_hwirq,
+				  unsigned int *out_flags)
+
+{
+	/* interrupt sense values coming from the device tree equal either
+	 * EDGE_FALLING or EDGE_BOTH
+	 */
+	*out_hwirq = intspec[0];
+	*out_flags = intspec[1];
+
+	return 0;
+}
+
+static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
+	.map	= mpc8xxx_gpio_irq_map,
+	.xlate	= mpc8xxx_gpio_irq_xlate,
+};
+
 static void __init mpc8xxx_add_controller(struct device_node *np)
 {
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc;
 	struct of_mm_gpio_chip *mm_gc;
 	struct of_gpio_chip *of_gc;
 	struct gpio_chip *gc;
+	unsigned hwirq;
 	int ret;
 
 	mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -158,11 +284,32 @@
 	else
 		gc->get = mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
+	gc->to_irq = mpc8xxx_gpio_to_irq;
 
 	ret = of_mm_gpiochip_add(np, mm_gc);
 	if (ret)
 		goto err;
 
+	hwirq = irq_of_parse_and_map(np, 0);
+	if (hwirq == NO_IRQ)
+		goto skip_irq;
+
+	mpc8xxx_gc->irq =
+		irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS,
+			       &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
+	if (!mpc8xxx_gc->irq)
+		goto skip_irq;
+
+	mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
+
+	/* ack and mask all irqs */
+	out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
+	out_be32(mm_gc->regs + GPIO_IMR, 0);
+
+	set_irq_data(hwirq, mpc8xxx_gc);
+	set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+
+skip_irq:
 	return;
 
 err:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 260295b..2102487 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -568,12 +568,12 @@
 #endif /* CONFIG_MPIC_U3_HT_IRQS */
 
 #ifdef CONFIG_SMP
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
 {
 	int cpuid;
 
 	if (cpumask_equal(mask, cpu_all_mask)) {
-		static int irq_rover;
+		static int irq_rover = 0;
 		static DEFINE_RAW_SPINLOCK(irq_rover_lock);
 		unsigned long flags;
 
@@ -581,15 +581,11 @@
 	do_round_robin:
 		raw_spin_lock_irqsave(&irq_rover_lock, flags);
 
-		while (!cpu_online(irq_rover)) {
-			if (++irq_rover >= NR_CPUS)
-				irq_rover = 0;
-		}
+		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
+		if (irq_rover >= nr_cpu_ids)
+			irq_rover = cpumask_first(cpu_online_mask);
+
 		cpuid = irq_rover;
-		do {
-			if (++irq_rover >= NR_CPUS)
-				irq_rover = 0;
-		} while (!cpu_online(irq_rover));
 
 		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
 	} else {
@@ -601,7 +597,7 @@
 	return get_hard_smp_processor_id(cpuid);
 }
 #else
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
 {
 	return hard_smp_processor_id();
 }
@@ -814,12 +810,16 @@
 
 		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
 	} else {
-		cpumask_t tmp;
+		cpumask_var_t tmp;
 
-		cpumask_and(&tmp, cpumask, cpu_online_mask);
+		alloc_cpumask_var(&tmp, GFP_KERNEL);
+
+		cpumask_and(tmp, cpumask, cpu_online_mask);
 
 		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
-			       mpic_physmask(cpus_addr(tmp)[0]));
+			       mpic_physmask(cpumask_bits(tmp)[0]));
+
+		free_cpumask_var(tmp);
 	}
 
 	return 0;
@@ -1479,21 +1479,6 @@
 }
 
 
-void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
-{
-	struct mpic *mpic = mpic_primary;
-
-	BUG_ON(mpic == NULL);
-
-#ifdef DEBUG_IPI
-	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
-	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
-		       ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
-		       mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
-}
-
 static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
 {
 	u32 src;
@@ -1589,8 +1574,25 @@
 	}
 }
 
+static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+{
+	struct mpic *mpic = mpic_primary;
+
+	BUG_ON(mpic == NULL);
+
+#ifdef DEBUG_IPI
+	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
+
+	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+		       ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
+		       mpic_physmask(cpumask_bits(cpu_mask)[0]));
+}
+
 void smp_mpic_message_pass(int target, int msg)
 {
+	cpumask_var_t tmp;
+
 	/* make sure we're sending something that translates to an IPI */
 	if ((unsigned int)msg > 3) {
 		printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -1599,13 +1601,17 @@
 	}
 	switch (target) {
 	case MSG_ALL:
-		mpic_send_ipi(msg, 0xffffffff);
+		mpic_send_ipi(msg, cpu_online_mask);
 		break;
 	case MSG_ALL_BUT_SELF:
-		mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+		alloc_cpumask_var(&tmp, GFP_NOWAIT);
+		cpumask_andnot(tmp, cpu_online_mask,
+			       cpumask_of(smp_processor_id()));
+		mpic_send_ipi(msg, tmp);
+		free_cpumask_var(tmp);
 		break;
 	default:
-		mpic_send_ipi(msg, 1 << target);
+		mpic_send_ipi(msg, cpumask_of(target));
 		break;
 	}
 }
@@ -1616,7 +1622,7 @@
 
 	DBG("smp_mpic_probe()...\n");
 
-	nr_cpus = cpus_weight(cpu_possible_map);
+	nr_cpus = cpumask_weight(cpu_possible_mask);
 
 	DBG("nr_cpus: %d\n", nr_cpus);
 
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index 1456015..198f288 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -24,7 +24,7 @@
 #define MV64X60_VAL_LEN_MAX		11
 #define MV64X60_PCICFG_CPCI_HOTSWAP	0x68
 
-static ssize_t mv64x60_hs_reg_read(struct kobject *kobj,
+static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
 				   struct bin_attribute *attr, char *buf,
 				   loff_t off, size_t count)
 {
@@ -45,7 +45,7 @@
 	return sprintf(buf, "0x%08x\n", v);
 }
 
-static ssize_t mv64x60_hs_reg_write(struct kobject *kobj,
+static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
 				    struct bin_attribute *attr, char *buf,
 				    loff_t off, size_t count)
 {
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 5c01435..d3d6ce3 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -191,11 +191,31 @@
 arch_initcall(ppc4xx_l2c_probe);
 
 /*
- * At present, this routine just applies a system reset.
+ * Apply a system reset. Alternatively a board specific value may be
+ * provided via the "reset-type" property in the cpu node.
  */
 void ppc4xx_reset_system(char *cmd)
 {
-	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_RST_SYSTEM);
+	struct device_node *np;
+	u32 reset_type = DBCR0_RST_SYSTEM;
+	const u32 *prop;
+
+	np = of_find_node_by_type(NULL, "cpu");
+	if (np) {
+		prop = of_get_property(np, "reset-type", NULL);
+
+		/*
+		 * Check if property exists and if it is in range:
+		 * 1 - PPC4xx core reset
+		 * 2 - PPC4xx chip reset
+		 * 3 - PPC4xx system reset (default)
+		 */
+		if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
+			reset_type = prop[0] << 28;
+	}
+
+	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
+
 	while (1)
 		;	/* Just in case the reset doesn't work */
 }
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 72c8b0d..a689070 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -403,8 +403,9 @@
 static struct kobj_attribute sys_ipl_device_attr =
 	__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
 
-static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
-				  char *buf, loff_t off, size_t count)
+static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
+				  struct bin_attribute *attr, char *buf,
+				  loff_t off, size_t count)
 {
 	return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
 					IPL_PARMBLOCK_SIZE);
@@ -419,8 +420,9 @@
 	.read = &ipl_parameter_read,
 };
 
-static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *attr,
-				 char *buf, loff_t off, size_t count)
+static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
+				 struct bin_attribute *attr, char *buf,
+				 loff_t off, size_t count)
 {
 	unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
 	void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
@@ -694,7 +696,7 @@
 
 /* FCP reipl device attributes */
 
-static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
+static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
 				      struct bin_attribute *attr,
 				      char *buf, loff_t off, size_t count)
 {
@@ -704,7 +706,7 @@
 	return memory_read_from_buffer(buf, count, &off, scp_data, size);
 }
 
-static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
+static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
 				       struct bin_attribute *attr,
 				       char *buf, loff_t off, size_t count)
 {
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 70c6965..efb6d39 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -237,6 +237,18 @@
 	return -1;
 }
 
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+	if (exception == 60)
+		return instruction_pointer(regs) - 2;
+	return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->pc = ip;
+}
+
 /*
  * The primary entry points for the kgdb debug trap table entries.
  */
@@ -247,7 +259,7 @@
 
 	local_irq_save(flags);
 	regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
-	kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+	kgdb_handle_exception(0, SIGTRAP, 0, regs);
 	local_irq_restore(flags);
 }
 
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 04df4ed..539243b 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -158,6 +158,12 @@
 {
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->pc = ip;
+	regs->npc = regs->pc + 4;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
 	/* Breakpoint instruction: ta 0x7d */
 	.gdb_bpt_instr		= { 0x91, 0xd0, 0x20, 0x7d },
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 0a2bd0f..768290a 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -181,6 +181,12 @@
 {
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->tpc = ip;
+	regs->tnpc = regs->tpc + 4;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
 	/* Breakpoint instruction: ta 0x72 */
 	.gdb_bpt_instr		= { 0x91, 0xd0, 0x20, 0x72 },
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 20bb0e1a..ff16756 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,9 @@
 #define IN	IN1
 #define KEY	%xmm2
 #define IV	%xmm3
+#define BSWAP_MASK %xmm10
+#define CTR	%xmm11
+#define INC	%xmm12
 
 #define KEYP	%rdi
 #define OUTP	%rsi
@@ -42,6 +45,7 @@
 #define T1	%r10
 #define TKEYP	T1
 #define T2	%r11
+#define TCTR_LOW T2
 
 _key_expansion_128:
 _key_expansion_256a:
@@ -724,3 +728,114 @@
 	movups IV, (IVP)
 .Lcbc_dec_just_ret:
 	ret
+
+.align 16
+.Lbswap_mask:
+	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/*
+ * _aesni_inc_init:	internal ABI
+ *	setup registers used by _aesni_inc
+ * input:
+ *	IV
+ * output:
+ *	CTR:	== IV, in little endian
+ *	TCTR_LOW: == lower qword of CTR
+ *	INC:	== 1, in little endian
+ *	BSWAP_MASK == endian swapping mask
+ */
+_aesni_inc_init:
+	movaps .Lbswap_mask, BSWAP_MASK
+	movaps IV, CTR
+	PSHUFB_XMM BSWAP_MASK CTR
+	mov $1, TCTR_LOW
+	MOVQ_R64_XMM TCTR_LOW INC
+	MOVQ_R64_XMM CTR TCTR_LOW
+	ret
+
+/*
+ * _aesni_inc:		internal ABI
+ *	Increase IV by 1, IV is in big endian
+ * input:
+ *	IV
+ *	CTR:	== IV, in little endian
+ *	TCTR_LOW: == lower qword of CTR
+ *	INC:	== 1, in little endian
+ *	BSWAP_MASK == endian swapping mask
+ * output:
+ *	IV:	Increase by 1
+ * changed:
+ *	CTR:	== output IV, in little endian
+ *	TCTR_LOW: == lower qword of CTR
+ */
+_aesni_inc:
+	paddq INC, CTR
+	add $1, TCTR_LOW
+	jnc .Linc_low
+	pslldq $8, INC
+	paddq INC, CTR
+	psrldq $8, INC
+.Linc_low:
+	movaps CTR, IV
+	PSHUFB_XMM BSWAP_MASK IV
+	ret
+
+/*
+ * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *		      size_t len, u8 *iv)
+ */
+ENTRY(aesni_ctr_enc)
+	cmp $16, LEN
+	jb .Lctr_enc_just_ret
+	mov 480(KEYP), KLEN
+	movups (IVP), IV
+	call _aesni_inc_init
+	cmp $64, LEN
+	jb .Lctr_enc_loop1
+.align 4
+.Lctr_enc_loop4:
+	movaps IV, STATE1
+	call _aesni_inc
+	movups (INP), IN1
+	movaps IV, STATE2
+	call _aesni_inc
+	movups 0x10(INP), IN2
+	movaps IV, STATE3
+	call _aesni_inc
+	movups 0x20(INP), IN3
+	movaps IV, STATE4
+	call _aesni_inc
+	movups 0x30(INP), IN4
+	call _aesni_enc4
+	pxor IN1, STATE1
+	movups STATE1, (OUTP)
+	pxor IN2, STATE2
+	movups STATE2, 0x10(OUTP)
+	pxor IN3, STATE3
+	movups STATE3, 0x20(OUTP)
+	pxor IN4, STATE4
+	movups STATE4, 0x30(OUTP)
+	sub $64, LEN
+	add $64, INP
+	add $64, OUTP
+	cmp $64, LEN
+	jge .Lctr_enc_loop4
+	cmp $16, LEN
+	jb .Lctr_enc_ret
+.align 4
+.Lctr_enc_loop1:
+	movaps IV, STATE
+	call _aesni_inc
+	movups (INP), IN
+	call _aesni_enc1
+	pxor IN, STATE
+	movups STATE, (OUTP)
+	sub $16, LEN
+	add $16, INP
+	add $16, OUTP
+	cmp $16, LEN
+	jge .Lctr_enc_loop1
+.Lctr_enc_ret:
+	movups IV, (IVP)
+.Lctr_enc_just_ret:
+	ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 49c552c..2cb3dcc 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -18,6 +18,7 @@
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 #include <crypto/cryptd.h>
+#include <crypto/ctr.h>
 #include <asm/i387.h>
 #include <asm/aes.h>
 
@@ -58,6 +59,8 @@
 			      const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+			      const u8 *in, unsigned int len, u8 *iv);
 
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
@@ -321,6 +324,72 @@
 	},
 };
 
+static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
+			    struct blkcipher_walk *walk)
+{
+	u8 *ctrblk = walk->iv;
+	u8 keystream[AES_BLOCK_SIZE];
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	unsigned int nbytes = walk->nbytes;
+
+	aesni_enc(ctx, keystream, ctrblk);
+	crypto_xor(keystream, src, nbytes);
+	memcpy(dst, keystream, nbytes);
+	crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc,
+		     struct scatterlist *dst, struct scatterlist *src,
+		     unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	kernel_fpu_begin();
+	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+			      nbytes & AES_BLOCK_MASK, walk.iv);
+		nbytes &= AES_BLOCK_SIZE - 1;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+	if (walk.nbytes) {
+		ctr_crypt_final(ctx, &walk);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	kernel_fpu_end();
+
+	return err;
+}
+
+static struct crypto_alg blk_ctr_alg = {
+	.cra_name		= "__ctr-aes-aesni",
+	.cra_driver_name	= "__driver-ctr-aes-aesni",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey		= aes_set_key,
+			.encrypt	= ctr_crypt,
+			.decrypt	= ctr_crypt,
+		},
+	},
+};
+
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
 {
@@ -467,13 +536,11 @@
 	},
 };
 
-#ifdef HAS_CTR
 static int ablk_ctr_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
 
-	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
-					     0, 0);
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
 	if (IS_ERR(cryptd_tfm))
 		return PTR_ERR(cryptd_tfm);
 	ablk_init_common(tfm, cryptd_tfm);
@@ -500,11 +567,50 @@
 			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= ablk_set_key,
 			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
+			.decrypt	= ablk_encrypt,
 			.geniv		= "chainiv",
 		},
 	},
 };
+
+#ifdef HAS_CTR
+static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher(
+		"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_rfc3686_ctr_alg = {
+	.cra_name		= "rfc3686(ctr(aes))",
+	.cra_driver_name	= "rfc3686-ctr-aes-aesni",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct async_aes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
+	.cra_init		= ablk_rfc3686_ctr_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+			.ivsize	     = CTR_RFC3686_IV_SIZE,
+			.setkey	     = ablk_set_key,
+			.encrypt     = ablk_encrypt,
+			.decrypt     = ablk_decrypt,
+			.geniv	     = "seqiv",
+		},
+	},
+};
 #endif
 
 #ifdef HAS_LRW
@@ -640,13 +746,17 @@
 		goto blk_ecb_err;
 	if ((err = crypto_register_alg(&blk_cbc_alg)))
 		goto blk_cbc_err;
+	if ((err = crypto_register_alg(&blk_ctr_alg)))
+		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ecb_alg)))
 		goto ablk_ecb_err;
 	if ((err = crypto_register_alg(&ablk_cbc_alg)))
 		goto ablk_cbc_err;
-#ifdef HAS_CTR
 	if ((err = crypto_register_alg(&ablk_ctr_alg)))
 		goto ablk_ctr_err;
+#ifdef HAS_CTR
+	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
+		goto ablk_rfc3686_ctr_err;
 #endif
 #ifdef HAS_LRW
 	if ((err = crypto_register_alg(&ablk_lrw_alg)))
@@ -675,13 +785,17 @@
 ablk_lrw_err:
 #endif
 #ifdef HAS_CTR
+	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
+ablk_rfc3686_ctr_err:
+#endif
 	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
-#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
 	crypto_unregister_alg(&ablk_ecb_alg);
 ablk_ecb_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 blk_cbc_err:
 	crypto_unregister_alg(&blk_ecb_alg);
@@ -705,10 +819,12 @@
 	crypto_unregister_alg(&ablk_lrw_alg);
 #endif
 #ifdef HAS_CTR
-	crypto_unregister_alg(&ablk_ctr_alg);
+	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 #endif
+	crypto_unregister_alg(&ablk_ctr_alg);
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);
+	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);
 	crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index c70068d..63e35ec 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -145,9 +145,11 @@
 int set_memory_4k(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
 int set_memory_array_wb(unsigned long *addr, int addrinarray);
 
 int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
 int set_pages_array_wb(struct page **pages, int addrinarray);
 
 /*
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 14cf526..280bf7f 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -7,7 +7,66 @@
 
 #ifdef __ASSEMBLY__
 
+#define REG_NUM_INVALID		100
+
+#define REG_TYPE_R64		0
+#define REG_TYPE_XMM		1
+#define REG_TYPE_INVALID	100
+
+	.macro R64_NUM opd r64
+	\opd = REG_NUM_INVALID
+	.ifc \r64,%rax
+	\opd = 0
+	.endif
+	.ifc \r64,%rcx
+	\opd = 1
+	.endif
+	.ifc \r64,%rdx
+	\opd = 2
+	.endif
+	.ifc \r64,%rbx
+	\opd = 3
+	.endif
+	.ifc \r64,%rsp
+	\opd = 4
+	.endif
+	.ifc \r64,%rbp
+	\opd = 5
+	.endif
+	.ifc \r64,%rsi
+	\opd = 6
+	.endif
+	.ifc \r64,%rdi
+	\opd = 7
+	.endif
+	.ifc \r64,%r8
+	\opd = 8
+	.endif
+	.ifc \r64,%r9
+	\opd = 9
+	.endif
+	.ifc \r64,%r10
+	\opd = 10
+	.endif
+	.ifc \r64,%r11
+	\opd = 11
+	.endif
+	.ifc \r64,%r12
+	\opd = 12
+	.endif
+	.ifc \r64,%r13
+	\opd = 13
+	.endif
+	.ifc \r64,%r14
+	\opd = 14
+	.endif
+	.ifc \r64,%r15
+	\opd = 15
+	.endif
+	.endm
+
 	.macro XMM_NUM opd xmm
+	\opd = REG_NUM_INVALID
 	.ifc \xmm,%xmm0
 	\opd = 0
 	.endif
@@ -58,13 +117,25 @@
 	.endif
 	.endm
 
+	.macro REG_TYPE type reg
+	R64_NUM reg_type_r64 \reg
+	XMM_NUM reg_type_xmm \reg
+	.if reg_type_r64 <> REG_NUM_INVALID
+	\type = REG_TYPE_R64
+	.elseif reg_type_xmm <> REG_NUM_INVALID
+	\type = REG_TYPE_XMM
+	.else
+	\type = REG_TYPE_INVALID
+	.endif
+	.endm
+
 	.macro PFX_OPD_SIZE
 	.byte 0x66
 	.endm
 
-	.macro PFX_REX opd1 opd2
-	.if (\opd1 | \opd2) & 8
-	.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
+	.macro PFX_REX opd1 opd2 W=0
+	.if ((\opd1 | \opd2) & 8) || \W
+	.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
 	.endif
 	.endm
 
@@ -145,6 +216,25 @@
 	.byte 0x0f, 0x38, 0xdf
 	MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
 	.endm
+
+	.macro MOVQ_R64_XMM opd1 opd2
+	REG_TYPE movq_r64_xmm_opd1_type \opd1
+	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+	XMM_NUM movq_r64_xmm_opd1 \opd1
+	R64_NUM movq_r64_xmm_opd2 \opd2
+	.else
+	R64_NUM movq_r64_xmm_opd1 \opd1
+	XMM_NUM movq_r64_xmm_opd2 \opd2
+	.endif
+	PFX_OPD_SIZE
+	PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
+	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+	.byte 0x0f, 0x7e
+	.else
+	.byte 0x0f, 0x6e
+	.endif
+	MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
+	.endm
 #endif
 
 #endif
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index e6c6c80..006da36 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -76,4 +76,7 @@
 #define BREAK_INSTR_SIZE	1
 #define CACHE_FLUSH_IS_SAFE	1
 
+extern int kgdb_ll_trap(int cmd, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig);
+
 #endif /* _ASM_X86_KGDB_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5a51379..7e5c6a6 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -789,6 +789,8 @@
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
+extern void early_trap_init(void);
+
 /* Defined in head.S */
 extern struct desc_ptr		early_gdt_descr;
 
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1c00d0..cc83a00 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1084,6 +1084,20 @@
 	}
 }
 
+#ifdef CONFIG_KGDB
+/*
+ * Restore debug regs if using kgdbwait and you have a kernel debugger
+ * connection established.
+ */
+static void dbg_restore_debug_regs(void)
+{
+	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
+		arch_kgdb_ops.correct_hw_break();
+}
+#else /* ! CONFIG_KGDB */
+#define dbg_restore_debug_regs()
+#endif /* ! CONFIG_KGDB */
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
@@ -1174,18 +1188,8 @@
 	load_TR_desc();
 	load_LDT(&init_mm.context);
 
-#ifdef CONFIG_KGDB
-	/*
-	 * If the kgdb is connected no debug regs should be altered.  This
-	 * is only applicable when KGDB and a KGDB I/O module are built
-	 * into the kernel and you are using early debugging with
-	 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
-	 */
-	if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
-		arch_kgdb_ops.correct_hw_break();
-	else
-#endif
-		clear_all_debug_regs();
+	clear_all_debug_regs();
+	dbg_restore_debug_regs();
 
 	fpu_init();
 
@@ -1239,6 +1243,7 @@
 #endif
 
 	clear_all_debug_regs();
+	dbg_restore_debug_regs();
 
 	/*
 	 * Force FPU initialization:
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index b9c830c..fa99bae 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -41,6 +41,14 @@
 				writew(0x720, VGABASE + 2*(max_xpos*j + i));
 			current_ypos = max_ypos-1;
 		}
+#ifdef CONFIG_KGDB_KDB
+		if (c == '\b') {
+			if (current_xpos > 0)
+				current_xpos--;
+		} else if (c == '\r') {
+			current_xpos = 0;
+		} else
+#endif
 		if (c == '\n') {
 			current_xpos = 0;
 			current_ypos++;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index b2258ca..4f4af75 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -47,20 +47,8 @@
 #include <asm/debugreg.h>
 #include <asm/apicdef.h>
 #include <asm/system.h>
-
 #include <asm/apic.h>
 
-/*
- * Put the error code here just in case the user cares:
- */
-static int gdb_x86errcode;
-
-/*
- * Likewise, the vector number here (since GDB only gets the signal
- * number through the usual means, and that's not very specific):
- */
-static int gdb_x86vector = -1;
-
 /**
  *	pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
  *	@gdb_regs: A pointer to hold the registers in the order GDB wants.
@@ -211,6 +199,8 @@
 	struct perf_event	**pev;
 } breakinfo[4];
 
+static unsigned long early_dr7;
+
 static void kgdb_correct_hw_break(void)
 {
 	int breakno;
@@ -222,6 +212,14 @@
 		int cpu = raw_smp_processor_id();
 		if (!breakinfo[breakno].enabled)
 			continue;
+		if (dbg_is_early) {
+			set_debugreg(breakinfo[breakno].addr, breakno);
+			early_dr7 |= encode_dr7(breakno,
+						breakinfo[breakno].len,
+						breakinfo[breakno].type);
+			set_debugreg(early_dr7, 7);
+			continue;
+		}
 		bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
 		info = counter_arch_bp(bp);
 		if (bp->attr.disabled != 1)
@@ -236,7 +234,8 @@
 		if (!val)
 			bp->attr.disabled = 0;
 	}
-	hw_breakpoint_restore();
+	if (!dbg_is_early)
+		hw_breakpoint_restore();
 }
 
 static int hw_break_reserve_slot(int breakno)
@@ -245,6 +244,9 @@
 	int cnt = 0;
 	struct perf_event **pevent;
 
+	if (dbg_is_early)
+		return 0;
+
 	for_each_online_cpu(cpu) {
 		cnt++;
 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
@@ -270,6 +272,9 @@
 	struct perf_event **pevent;
 	int cpu;
 
+	if (dbg_is_early)
+		return 0;
+
 	for_each_online_cpu(cpu) {
 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
 		if (dbg_release_bp_slot(*pevent))
@@ -314,7 +319,11 @@
 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
 		if (bp->attr.disabled == 1)
 			continue;
-		arch_uninstall_hw_breakpoint(bp);
+		if (dbg_is_early)
+			early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+						 breakinfo[i].type);
+		else
+			arch_uninstall_hw_breakpoint(bp);
 		bp->attr.disabled = 1;
 	}
 }
@@ -391,6 +400,11 @@
 	for (i = 0; i < 4; i++) {
 		if (!breakinfo[i].enabled)
 			continue;
+		if (dbg_is_early) {
+			early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+						 breakinfo[i].type);
+			continue;
+		}
 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
 		if (bp->attr.disabled == 1)
 			continue;
@@ -399,23 +413,6 @@
 	}
 }
 
-/**
- *	kgdb_post_primary_code - Save error vector/code numbers.
- *	@regs: Original pt_regs.
- *	@e_vector: Original error vector.
- *	@err_code: Original error code.
- *
- *	This is needed on architectures which support SMP and KGDB.
- *	This function is called after all the slave cpus have been put
- *	to a know spin state and the primary CPU has control over KGDB.
- */
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
-	/* primary processor is completely in the debugger */
-	gdb_x86vector = e_vector;
-	gdb_x86errcode = err_code;
-}
-
 #ifdef CONFIG_SMP
 /**
  *	kgdb_roundup_cpus - Get other CPUs into a holding pattern
@@ -567,7 +564,7 @@
 			return NOTIFY_DONE;
 	}
 
-	if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+	if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
 		return NOTIFY_DONE;
 
 	/* Must touch watchdog before return to normal operation */
@@ -575,6 +572,26 @@
 	return NOTIFY_STOP;
 }
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+		 struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs	= regs,
+		.str	= str,
+		.err	= err,
+		.trapnr	= trap,
+		.signr	= sig,
+
+	};
+
+	if (!kgdb_io_module_registered)
+		return NOTIFY_DONE;
+
+	return __kgdb_notify(&args, cmd);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
 static int
 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
 {
@@ -605,14 +622,15 @@
  */
 int kgdb_arch_init(void)
 {
+	return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
 	int i, cpu;
-	int ret;
 	struct perf_event_attr attr;
 	struct perf_event **pevent;
 
-	ret = register_die_notifier(&kgdb_notifier);
-	if (ret != 0)
-		return ret;
 	/*
 	 * Pre-allocate the hw breakpoint structions in the non-atomic
 	 * portion of kgdb because this operation requires mutexs to
@@ -624,12 +642,15 @@
 	attr.bp_type = HW_BREAKPOINT_W;
 	attr.disabled = 1;
 	for (i = 0; i < 4; i++) {
+		if (breakinfo[i].pev)
+			continue;
 		breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
 		if (IS_ERR(breakinfo[i].pev)) {
-			printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n");
+			printk(KERN_ERR "kgdb: Could not allocate hw"
+			       "breakpoints\nDisabling the kernel debugger\n");
 			breakinfo[i].pev = NULL;
 			kgdb_arch_exit();
-			return -1;
+			return;
 		}
 		for_each_online_cpu(cpu) {
 			pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
@@ -640,7 +661,6 @@
 			}
 		}
 	}
-	return ret;
 }
 
 /**
@@ -690,6 +710,11 @@
 	return instruction_pointer(regs);
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->ip = ip;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
 	/* Breakpoint instruction: */
 	.gdb_bpt_instr		= { 0xcc },
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4851ef..e802989 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -725,6 +725,7 @@
 	/* VMI may relocate the fixmap; do this before touching ioremap area */
 	vmi_init();
 
+	early_trap_init();
 	early_cpu_init();
 	early_ioremap_init();
 
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 02cfb9b..142d70c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -15,6 +15,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/kgdb.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
@@ -451,6 +452,11 @@
 /* May run on IST stack. */
 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
 {
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+			== NOTIFY_STOP)
+		return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 #ifdef CONFIG_KPROBES
 	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
 			== NOTIFY_STOP)
@@ -802,6 +808,16 @@
 }
 #endif
 
+/* Set of traps needed for early debugging. */
+void __init early_trap_init(void)
+{
+	set_intr_gate_ist(1, &debug, DEBUG_STACK);
+	/* int3 can be called from all */
+	set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+	set_intr_gate(14, &page_fault);
+	load_idt(&idt_descr);
+}
+
 void __init trap_init(void)
 {
 	int i;
@@ -815,10 +831,7 @@
 #endif
 
 	set_intr_gate(0, &divide_error);
-	set_intr_gate_ist(1, &debug, DEBUG_STACK);
 	set_intr_gate_ist(2, &nmi, NMI_STACK);
-	/* int3 can be called from all */
-	set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
 	/* int4 can be called from all */
 	set_system_intr_gate(4, &overflow);
 	set_intr_gate(5, &bounds);
@@ -834,7 +847,6 @@
 	set_intr_gate(11, &segment_not_present);
 	set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
 	set_intr_gate(13, &general_protection);
-	set_intr_gate(14, &page_fault);
 	set_intr_gate(15, &spurious_interrupt_bug);
 	set_intr_gate(16, &coprocessor_error);
 	set_intr_gate(17, &alignment_check);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 28195c3..532e793 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -997,7 +997,8 @@
 }
 EXPORT_SYMBOL(set_memory_uc);
 
-int set_memory_array_uc(unsigned long *addr, int addrinarray)
+int _set_memory_array(unsigned long *addr, int addrinarray,
+		unsigned long new_type)
 {
 	int i, j;
 	int ret;
@@ -1007,13 +1008,19 @@
 	 */
 	for (i = 0; i < addrinarray; i++) {
 		ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
-					_PAGE_CACHE_UC_MINUS, NULL);
+					new_type, NULL);
 		if (ret)
 			goto out_free;
 	}
 
 	ret = change_page_attr_set(addr, addrinarray,
 				    __pgprot(_PAGE_CACHE_UC_MINUS), 1);
+
+	if (!ret && new_type == _PAGE_CACHE_WC)
+		ret = change_page_attr_set_clr(addr, addrinarray,
+					       __pgprot(_PAGE_CACHE_WC),
+					       __pgprot(_PAGE_CACHE_MASK),
+					       0, CPA_ARRAY, NULL);
 	if (ret)
 		goto out_free;
 
@@ -1025,8 +1032,19 @@
 
 	return ret;
 }
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+	return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
 EXPORT_SYMBOL(set_memory_array_uc);
 
+int set_memory_array_wc(unsigned long *addr, int addrinarray)
+{
+	return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_memory_array_wc);
+
 int _set_memory_wc(unsigned long addr, int numpages)
 {
 	int ret;
@@ -1153,26 +1171,34 @@
 }
 EXPORT_SYMBOL(set_pages_uc);
 
-int set_pages_array_uc(struct page **pages, int addrinarray)
+static int _set_pages_array(struct page **pages, int addrinarray,
+		unsigned long new_type)
 {
 	unsigned long start;
 	unsigned long end;
 	int i;
 	int free_idx;
+	int ret;
 
 	for (i = 0; i < addrinarray; i++) {
 		if (PageHighMem(pages[i]))
 			continue;
 		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
 		end = start + PAGE_SIZE;
-		if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+		if (reserve_memtype(start, end, new_type, NULL))
 			goto err_out;
 	}
 
-	if (cpa_set_pages_array(pages, addrinarray,
-			__pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
-		return 0; /* Success */
-	}
+	ret = cpa_set_pages_array(pages, addrinarray,
+			__pgprot(_PAGE_CACHE_UC_MINUS));
+	if (!ret && new_type == _PAGE_CACHE_WC)
+		ret = change_page_attr_set_clr(NULL, addrinarray,
+					       __pgprot(_PAGE_CACHE_WC),
+					       __pgprot(_PAGE_CACHE_MASK),
+					       0, CPA_PAGES_ARRAY, pages);
+	if (ret)
+		goto err_out;
+	return 0; /* Success */
 err_out:
 	free_idx = i;
 	for (i = 0; i < free_idx; i++) {
@@ -1184,8 +1210,19 @@
 	}
 	return -EINVAL;
 }
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+	return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
 EXPORT_SYMBOL(set_pages_array_uc);
 
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+	return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
 int set_pages_wb(struct page *page, int numpages)
 {
 	unsigned long addr = (unsigned long)page_address(page);
diff --git a/block/Kconfig b/block/Kconfig
index f9e89f4..9be0b56 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -77,29 +77,6 @@
 	T10/SCSI Data Integrity Field or the T13/ATA External Path
 	Protection.  If in doubt, say N.
 
-config BLK_CGROUP
-	tristate "Block cgroup support"
-	depends on CGROUPS
-	depends on CFQ_GROUP_IOSCHED
-	default n
-	---help---
-	Generic block IO controller cgroup interface. This is the common
-	cgroup interface which should be used by various IO controlling
-	policies.
-
-	Currently, CFQ IO scheduler uses it to recognize task groups and
-	control disk bandwidth allocation (proportional time slice allocation)
-	to such task groups.
-
-config DEBUG_BLK_CGROUP
-	bool
-	depends on BLK_CGROUP
-	default n
-	---help---
-	Enable some debugging help. Currently it stores the cgroup path
-	in the blk group which can be used by cfq for tracing various
-	group related activity.
-
 endif # BLOCK
 
 config BLOCK_COMPAT
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index fc71cf07..3199b76 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,7 +23,8 @@
 
 config IOSCHED_CFQ
 	tristate "CFQ I/O scheduler"
-	select BLK_CGROUP if CFQ_GROUP_IOSCHED
+	# If BLK_CGROUP is a module, CFQ has to be built as module.
+	depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
 	default y
 	---help---
 	  The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -33,22 +34,15 @@
 
 	  This is the default I/O scheduler.
 
+	  Note: If BLK_CGROUP=m, then CFQ can be built only as module.
+
 config CFQ_GROUP_IOSCHED
 	bool "CFQ Group Scheduling support"
-	depends on IOSCHED_CFQ && CGROUPS
+	depends on IOSCHED_CFQ && BLK_CGROUP
 	default n
 	---help---
 	  Enable group IO scheduling in CFQ.
 
-config DEBUG_CFQ_IOSCHED
-	bool "Debug CFQ Scheduling"
-	depends on CFQ_GROUP_IOSCHED
-	select DEBUG_BLK_CGROUP
-	default n
-	---help---
-	  Enable CFQ IO scheduling debugging in CFQ. Currently it makes
-	  blktrace output more verbose.
-
 choice
 	prompt "Default I/O scheduler"
 	default DEFAULT_CFQ
diff --git a/block/Makefile b/block/Makefile
index cb2d515..0bb499a 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
 			blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
 			blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
-			blk-iopoll.o ioctl.o genhd.o scsi_ioctl.o
+			blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
 
 obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
 obj-$(CONFIG_BLK_CGROUP)	+= blk-cgroup.o
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6d88544..0d710c9 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -286,26 +286,31 @@
 			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 	}
-
-	complete(bio->bi_private);
+	if (bio->bi_private)
+		complete(bio->bi_private);
+	bio_put(bio);
 }
 
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:	blockdev to issue flush for
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
  * @error_sector:	error sector
+ * @flags:	BLKDEV_IFL_* flags to control behaviour
  *
  * Description:
  *    Issue a flush for the block device in question. Caller can supply
  *    room for storing the error offset in case of a flush error, if they
- *    wish to.
+ *    wish to. If WAIT flag is not passed then caller may check only what
+ *    request was pushed in some internal queue for later handling.
  */
-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+		sector_t *error_sector, unsigned long flags)
 {
 	DECLARE_COMPLETION_ONSTACK(wait);
 	struct request_queue *q;
 	struct bio *bio;
-	int ret;
+	int ret = 0;
 
 	if (bdev->bd_disk == NULL)
 		return -ENXIO;
@@ -314,23 +319,25 @@
 	if (!q)
 		return -ENXIO;
 
-	bio = bio_alloc(GFP_KERNEL, 0);
+	bio = bio_alloc(gfp_mask, 0);
 	bio->bi_end_io = bio_end_empty_barrier;
-	bio->bi_private = &wait;
 	bio->bi_bdev = bdev;
+	if (test_bit(BLKDEV_WAIT, &flags))
+		bio->bi_private = &wait;
+
+	bio_get(bio);
 	submit_bio(WRITE_BARRIER, bio);
+	if (test_bit(BLKDEV_WAIT, &flags)) {
+		wait_for_completion(&wait);
+		/*
+		 * The driver must store the error location in ->bi_sector, if
+		 * it supports it. For non-stacked drivers, this should be
+		 * copied from blk_rq_pos(rq).
+		 */
+		if (error_sector)
+			*error_sector = bio->bi_sector;
+	}
 
-	wait_for_completion(&wait);
-
-	/*
-	 * The driver must store the error location in ->bi_sector, if
-	 * it supports it. For non-stacked drivers, this should be copied
-	 * from blk_rq_pos(rq).
-	 */
-	if (error_sector)
-		*error_sector = bio->bi_sector;
-
-	ret = 0;
 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
 		ret = -EOPNOTSUPP;
 	else if (!bio_flagged(bio, BIO_UPTODATE))
@@ -340,107 +347,3 @@
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
-
-static void blkdev_discard_end_io(struct bio *bio, int err)
-{
-	if (err) {
-		if (err == -EOPNOTSUPP)
-			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
-	}
-
-	if (bio->bi_private)
-		complete(bio->bi_private);
-	__free_page(bio_page(bio));
-
-	bio_put(bio);
-}
-
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev:	blockdev to issue discard for
- * @sector:	start sector
- * @nr_sects:	number of sectors to discard
- * @gfp_mask:	memory allocation flags (for bio_alloc)
- * @flags:	DISCARD_FL_* flags to control behaviour
- *
- * Description:
- *    Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-		sector_t nr_sects, gfp_t gfp_mask, int flags)
-{
-	DECLARE_COMPLETION_ONSTACK(wait);
-	struct request_queue *q = bdev_get_queue(bdev);
-	int type = flags & DISCARD_FL_BARRIER ?
-		DISCARD_BARRIER : DISCARD_NOBARRIER;
-	struct bio *bio;
-	struct page *page;
-	int ret = 0;
-
-	if (!q)
-		return -ENXIO;
-
-	if (!blk_queue_discard(q))
-		return -EOPNOTSUPP;
-
-	while (nr_sects && !ret) {
-		unsigned int sector_size = q->limits.logical_block_size;
-		unsigned int max_discard_sectors =
-			min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
-		bio = bio_alloc(gfp_mask, 1);
-		if (!bio)
-			goto out;
-		bio->bi_sector = sector;
-		bio->bi_end_io = blkdev_discard_end_io;
-		bio->bi_bdev = bdev;
-		if (flags & DISCARD_FL_WAIT)
-			bio->bi_private = &wait;
-
-		/*
-		 * Add a zeroed one-sector payload as that's what
-		 * our current implementations need.  If we'll ever need
-		 * more the interface will need revisiting.
-		 */
-		page = alloc_page(gfp_mask | __GFP_ZERO);
-		if (!page)
-			goto out_free_bio;
-		if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
-			goto out_free_page;
-
-		/*
-		 * And override the bio size - the way discard works we
-		 * touch many more blocks on disk than the actual payload
-		 * length.
-		 */
-		if (nr_sects > max_discard_sectors) {
-			bio->bi_size = max_discard_sectors << 9;
-			nr_sects -= max_discard_sectors;
-			sector += max_discard_sectors;
-		} else {
-			bio->bi_size = nr_sects << 9;
-			nr_sects = 0;
-		}
-
-		bio_get(bio);
-		submit_bio(type, bio);
-
-		if (flags & DISCARD_FL_WAIT)
-			wait_for_completion(&wait);
-
-		if (bio_flagged(bio, BIO_EOPNOTSUPP))
-			ret = -EOPNOTSUPP;
-		else if (!bio_flagged(bio, BIO_UPTODATE))
-			ret = -EIO;
-		bio_put(bio);
-	}
-	return ret;
-out_free_page:
-	__free_page(page);
-out_free_bio:
-	bio_put(bio);
-out:
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2cc682b..a680964 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -15,8 +15,12 @@
 #include <linux/kdev_t.h>
 #include <linux/module.h>
 #include <linux/err.h>
+#include <linux/blkdev.h>
 #include <linux/slab.h>
 #include "blk-cgroup.h"
+#include <linux/genhd.h>
+
+#define MAX_KEY_LEN 100
 
 static DEFINE_SPINLOCK(blkio_list_lock);
 static LIST_HEAD(blkio_list);
@@ -49,6 +53,32 @@
 };
 EXPORT_SYMBOL_GPL(blkio_subsys);
 
+static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
+					    struct blkio_policy_node *pn)
+{
+	list_add(&pn->node, &blkcg->policy_list);
+}
+
+/* Must be called with blkcg->lock held */
+static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+{
+	list_del(&pn->node);
+}
+
+/* Must be called with blkcg->lock held */
+static struct blkio_policy_node *
+blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
+{
+	struct blkio_policy_node *pn;
+
+	list_for_each_entry(pn, &blkcg->policy_list, node) {
+		if (pn->dev == dev)
+			return pn;
+	}
+
+	return NULL;
+}
+
 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
 {
 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -56,13 +86,259 @@
 }
 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
 
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
-			unsigned long time, unsigned long sectors)
+/*
+ * Add to the appropriate stat variable depending on the request type.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
+				bool sync)
 {
-	blkg->time += time;
-	blkg->sectors += sectors;
+	if (direction)
+		stat[BLKIO_STAT_WRITE] += add;
+	else
+		stat[BLKIO_STAT_READ] += add;
+	if (sync)
+		stat[BLKIO_STAT_SYNC] += add;
+	else
+		stat[BLKIO_STAT_ASYNC] += add;
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
+
+/*
+ * Decrements the appropriate stat variable if non-zero depending on the
+ * request type. Panics on value being zero.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
+{
+	if (direction) {
+		BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
+		stat[BLKIO_STAT_WRITE]--;
+	} else {
+		BUG_ON(stat[BLKIO_STAT_READ] == 0);
+		stat[BLKIO_STAT_READ]--;
+	}
+	if (sync) {
+		BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
+		stat[BLKIO_STAT_SYNC]--;
+	} else {
+		BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
+		stat[BLKIO_STAT_ASYNC]--;
+	}
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+						struct blkio_group *curr_blkg)
+{
+	if (blkio_blkg_waiting(&blkg->stats))
+		return;
+	if (blkg == curr_blkg)
+		return;
+	blkg->stats.start_group_wait_time = sched_clock();
+	blkio_mark_blkg_waiting(&blkg->stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+{
+	unsigned long long now;
+
+	if (!blkio_blkg_waiting(stats))
+		return;
+
+	now = sched_clock();
+	if (time_after64(now, stats->start_group_wait_time))
+		stats->group_wait_time += now - stats->start_group_wait_time;
+	blkio_clear_blkg_waiting(stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_end_empty_time(struct blkio_group_stats *stats)
+{
+	unsigned long long now;
+
+	if (!blkio_blkg_empty(stats))
+		return;
+
+	now = sched_clock();
+	if (time_after64(now, stats->start_empty_time))
+		stats->empty_time += now - stats->start_empty_time;
+	blkio_clear_blkg_empty(stats);
+}
+
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	BUG_ON(blkio_blkg_idling(&blkg->stats));
+	blkg->stats.start_idle_time = sched_clock();
+	blkio_mark_blkg_idling(&blkg->stats);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+	unsigned long flags;
+	unsigned long long now;
+	struct blkio_group_stats *stats;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	stats = &blkg->stats;
+	if (blkio_blkg_idling(stats)) {
+		now = sched_clock();
+		if (time_after64(now, stats->start_idle_time))
+			stats->idle_time += now - stats->start_idle_time;
+		blkio_clear_blkg_idling(stats);
+	}
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
+
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
+{
+	unsigned long flags;
+	struct blkio_group_stats *stats;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	stats = &blkg->stats;
+	stats->avg_queue_size_sum +=
+			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
+			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
+	stats->avg_queue_size_samples++;
+	blkio_update_group_wait_time(stats);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+
+void blkiocg_set_start_empty_time(struct blkio_group *blkg)
+{
+	unsigned long flags;
+	struct blkio_group_stats *stats;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	stats = &blkg->stats;
+
+	if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
+			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
+		spin_unlock_irqrestore(&blkg->stats_lock, flags);
+		return;
+	}
+
+	/*
+	 * group is already marked empty. This can happen if cfqq got new
+	 * request in parent group and moved to this group while being added
+	 * to service tree. Just ignore the event and move on.
+	 */
+	if(blkio_blkg_empty(stats)) {
+		spin_unlock_irqrestore(&blkg->stats_lock, flags);
+		return;
+	}
+
+	stats->start_empty_time = sched_clock();
+	blkio_mark_blkg_empty(stats);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
+
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+			unsigned long dequeue)
+{
+	blkg->stats.dequeue += dequeue;
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
+#else
+static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+					struct blkio_group *curr_blkg) {}
+static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
+#endif
+
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+			struct blkio_group *curr_blkg, bool direction,
+			bool sync)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
+			sync);
+	blkio_end_empty_time(&blkg->stats);
+	blkio_set_start_group_wait_time(blkg, curr_blkg);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+						bool direction, bool sync)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
+					direction, sync);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+
+void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	blkg->stats.time += time;
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+				uint64_t bytes, bool direction, bool sync)
+{
+	struct blkio_group_stats *stats;
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	stats = &blkg->stats;
+	stats->sectors += bytes >> 9;
+	blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
+			sync);
+	blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
+			direction, sync);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
+{
+	struct blkio_group_stats *stats;
+	unsigned long flags;
+	unsigned long long now = sched_clock();
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	stats = &blkg->stats;
+	if (time_after64(now, io_start_time))
+		blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
+				now - io_start_time, direction, sync);
+	if (time_after64(io_start_time, start_time))
+		blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
+				io_start_time - start_time, direction, sync);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
+
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+					bool sync)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blkg->stats_lock, flags);
+	blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
+			sync);
+	spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
 
 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
 			struct blkio_group *blkg, void *key, dev_t dev)
@@ -70,14 +346,13 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&blkcg->lock, flags);
+	spin_lock_init(&blkg->stats_lock);
 	rcu_assign_pointer(blkg->key, key);
 	blkg->blkcg_id = css_id(&blkcg->css);
 	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 	spin_unlock_irqrestore(&blkcg->lock, flags);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
 	/* Need to take css reference ? */
 	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
-#endif
 	blkg->dev = dev;
 }
 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
@@ -101,17 +376,16 @@
 
 	rcu_read_lock();
 	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
-	if (!css)
-		goto out;
-
-	blkcg = container_of(css, struct blkio_cgroup, css);
-	spin_lock_irqsave(&blkcg->lock, flags);
-	if (!hlist_unhashed(&blkg->blkcg_node)) {
-		__blkiocg_del_blkio_group(blkg);
-		ret = 0;
+	if (css) {
+		blkcg = container_of(css, struct blkio_cgroup, css);
+		spin_lock_irqsave(&blkcg->lock, flags);
+		if (!hlist_unhashed(&blkg->blkcg_node)) {
+			__blkiocg_del_blkio_group(blkg);
+			ret = 0;
+		}
+		spin_unlock_irqrestore(&blkcg->lock, flags);
 	}
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-out:
+
 	rcu_read_unlock();
 	return ret;
 }
@@ -154,6 +428,7 @@
 	struct blkio_group *blkg;
 	struct hlist_node *n;
 	struct blkio_policy_type *blkiop;
+	struct blkio_policy_node *pn;
 
 	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
 		return -EINVAL;
@@ -162,7 +437,13 @@
 	spin_lock(&blkio_list_lock);
 	spin_lock_irq(&blkcg->lock);
 	blkcg->weight = (unsigned int)val;
+
 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+		pn = blkio_policy_search_node(blkcg, blkg->dev);
+
+		if (pn)
+			continue;
+
 		list_for_each_entry(blkiop, &blkio_list, list)
 			blkiop->ops.blkio_update_group_weight_fn(blkg,
 					blkcg->weight);
@@ -172,13 +453,154 @@
 	return 0;
 }
 
-#define SHOW_FUNCTION_PER_GROUP(__VAR)					\
+static int
+blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+{
+	struct blkio_cgroup *blkcg;
+	struct blkio_group *blkg;
+	struct blkio_group_stats *stats;
+	struct hlist_node *n;
+	uint64_t queued[BLKIO_STAT_TOTAL];
+	int i;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	bool idling, waiting, empty;
+	unsigned long long now = sched_clock();
+#endif
+
+	blkcg = cgroup_to_blkio_cgroup(cgroup);
+	spin_lock_irq(&blkcg->lock);
+	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+		spin_lock(&blkg->stats_lock);
+		stats = &blkg->stats;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+		idling = blkio_blkg_idling(stats);
+		waiting = blkio_blkg_waiting(stats);
+		empty = blkio_blkg_empty(stats);
+#endif
+		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+			queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
+		memset(stats, 0, sizeof(struct blkio_group_stats));
+		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+			stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+		if (idling) {
+			blkio_mark_blkg_idling(stats);
+			stats->start_idle_time = now;
+		}
+		if (waiting) {
+			blkio_mark_blkg_waiting(stats);
+			stats->start_group_wait_time = now;
+		}
+		if (empty) {
+			blkio_mark_blkg_empty(stats);
+			stats->start_empty_time = now;
+		}
+#endif
+		spin_unlock(&blkg->stats_lock);
+	}
+	spin_unlock_irq(&blkcg->lock);
+	return 0;
+}
+
+static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
+				int chars_left, bool diskname_only)
+{
+	snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
+	chars_left -= strlen(str);
+	if (chars_left <= 0) {
+		printk(KERN_WARNING
+			"Possibly incorrect cgroup stat display format");
+		return;
+	}
+	if (diskname_only)
+		return;
+	switch (type) {
+	case BLKIO_STAT_READ:
+		strlcat(str, " Read", chars_left);
+		break;
+	case BLKIO_STAT_WRITE:
+		strlcat(str, " Write", chars_left);
+		break;
+	case BLKIO_STAT_SYNC:
+		strlcat(str, " Sync", chars_left);
+		break;
+	case BLKIO_STAT_ASYNC:
+		strlcat(str, " Async", chars_left);
+		break;
+	case BLKIO_STAT_TOTAL:
+		strlcat(str, " Total", chars_left);
+		break;
+	default:
+		strlcat(str, " Invalid", chars_left);
+	}
+}
+
+static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
+				struct cgroup_map_cb *cb, dev_t dev)
+{
+	blkio_get_key_name(0, dev, str, chars_left, true);
+	cb->fill(cb, str, val);
+	return val;
+}
+
+/* This should be called with blkg->stats_lock held */
+static uint64_t blkio_get_stat(struct blkio_group *blkg,
+		struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+{
+	uint64_t disk_total;
+	char key_str[MAX_KEY_LEN];
+	enum stat_sub_type sub_type;
+
+	if (type == BLKIO_STAT_TIME)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.time, cb, dev);
+	if (type == BLKIO_STAT_SECTORS)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.sectors, cb, dev);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
+		uint64_t sum = blkg->stats.avg_queue_size_sum;
+		uint64_t samples = blkg->stats.avg_queue_size_samples;
+		if (samples)
+			do_div(sum, samples);
+		else
+			sum = 0;
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
+	}
+	if (type == BLKIO_STAT_GROUP_WAIT_TIME)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.group_wait_time, cb, dev);
+	if (type == BLKIO_STAT_IDLE_TIME)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.idle_time, cb, dev);
+	if (type == BLKIO_STAT_EMPTY_TIME)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.empty_time, cb, dev);
+	if (type == BLKIO_STAT_DEQUEUE)
+		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+					blkg->stats.dequeue, cb, dev);
+#endif
+
+	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+			sub_type++) {
+		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+		cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
+	}
+	disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
+			blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
+	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+	cb->fill(cb, key_str, disk_total);
+	return disk_total;
+}
+
+#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total)		\
 static int blkiocg_##__VAR##_read(struct cgroup *cgroup,		\
-			struct cftype *cftype, struct seq_file *m)	\
+		struct cftype *cftype, struct cgroup_map_cb *cb)	\
 {									\
 	struct blkio_cgroup *blkcg;					\
 	struct blkio_group *blkg;					\
 	struct hlist_node *n;						\
+	uint64_t cgroup_total = 0;					\
 									\
 	if (!cgroup_lock_live_group(cgroup))				\
 		return -ENODEV;						\
@@ -186,50 +608,293 @@
 	blkcg = cgroup_to_blkio_cgroup(cgroup);				\
 	rcu_read_lock();						\
 	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
-		if (blkg->dev)						\
-			seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev),	\
-				 MINOR(blkg->dev), blkg->__VAR);	\
+		if (blkg->dev) {					\
+			spin_lock_irq(&blkg->stats_lock);		\
+			cgroup_total += blkio_get_stat(blkg, cb,	\
+						blkg->dev, type);	\
+			spin_unlock_irq(&blkg->stats_lock);		\
+		}							\
 	}								\
+	if (show_total)							\
+		cb->fill(cb, "Total", cgroup_total);			\
 	rcu_read_unlock();						\
 	cgroup_unlock();						\
 	return 0;							\
 }
 
-SHOW_FUNCTION_PER_GROUP(time);
-SHOW_FUNCTION_PER_GROUP(sectors);
+SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
+SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
+SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
+SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
+SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
 #ifdef CONFIG_DEBUG_BLK_CGROUP
-SHOW_FUNCTION_PER_GROUP(dequeue);
+SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
+SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
+SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
 #endif
 #undef SHOW_FUNCTION_PER_GROUP
 
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
-			unsigned long dequeue)
+static int blkio_check_dev_num(dev_t dev)
 {
-	blkg->dequeue += dequeue;
+	int part = 0;
+	struct gendisk *disk;
+
+	disk = get_gendisk(dev, &part);
+	if (!disk || part)
+		return -ENODEV;
+
+	return 0;
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
-#endif
+
+static int blkio_policy_parse_and_set(char *buf,
+				      struct blkio_policy_node *newpn)
+{
+	char *s[4], *p, *major_s = NULL, *minor_s = NULL;
+	int ret;
+	unsigned long major, minor, temp;
+	int i = 0;
+	dev_t dev;
+
+	memset(s, 0, sizeof(s));
+
+	while ((p = strsep(&buf, " ")) != NULL) {
+		if (!*p)
+			continue;
+
+		s[i++] = p;
+
+		/* Prevent from inputing too many things */
+		if (i == 3)
+			break;
+	}
+
+	if (i != 2)
+		return -EINVAL;
+
+	p = strsep(&s[0], ":");
+	if (p != NULL)
+		major_s = p;
+	else
+		return -EINVAL;
+
+	minor_s = s[0];
+	if (!minor_s)
+		return -EINVAL;
+
+	ret = strict_strtoul(major_s, 10, &major);
+	if (ret)
+		return -EINVAL;
+
+	ret = strict_strtoul(minor_s, 10, &minor);
+	if (ret)
+		return -EINVAL;
+
+	dev = MKDEV(major, minor);
+
+	ret = blkio_check_dev_num(dev);
+	if (ret)
+		return ret;
+
+	newpn->dev = dev;
+
+	if (s[1] == NULL)
+		return -EINVAL;
+
+	ret = strict_strtoul(s[1], 10, &temp);
+	if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+	    temp > BLKIO_WEIGHT_MAX)
+		return -EINVAL;
+
+	newpn->weight =  temp;
+
+	return 0;
+}
+
+unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+			      dev_t dev)
+{
+	struct blkio_policy_node *pn;
+
+	pn = blkio_policy_search_node(blkcg, dev);
+	if (pn)
+		return pn->weight;
+	else
+		return blkcg->weight;
+}
+EXPORT_SYMBOL_GPL(blkcg_get_weight);
+
+
+static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
+				       const char *buffer)
+{
+	int ret = 0;
+	char *buf;
+	struct blkio_policy_node *newpn, *pn;
+	struct blkio_cgroup *blkcg;
+	struct blkio_group *blkg;
+	int keep_newpn = 0;
+	struct hlist_node *n;
+	struct blkio_policy_type *blkiop;
+
+	buf = kstrdup(buffer, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
+	if (!newpn) {
+		ret = -ENOMEM;
+		goto free_buf;
+	}
+
+	ret = blkio_policy_parse_and_set(buf, newpn);
+	if (ret)
+		goto free_newpn;
+
+	blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+	spin_lock_irq(&blkcg->lock);
+
+	pn = blkio_policy_search_node(blkcg, newpn->dev);
+	if (!pn) {
+		if (newpn->weight != 0) {
+			blkio_policy_insert_node(blkcg, newpn);
+			keep_newpn = 1;
+		}
+		spin_unlock_irq(&blkcg->lock);
+		goto update_io_group;
+	}
+
+	if (newpn->weight == 0) {
+		/* weight == 0 means deleteing a specific weight */
+		blkio_policy_delete_node(pn);
+		spin_unlock_irq(&blkcg->lock);
+		goto update_io_group;
+	}
+	spin_unlock_irq(&blkcg->lock);
+
+	pn->weight = newpn->weight;
+
+update_io_group:
+	/* update weight for each cfqg */
+	spin_lock(&blkio_list_lock);
+	spin_lock_irq(&blkcg->lock);
+
+	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+		if (newpn->dev == blkg->dev) {
+			list_for_each_entry(blkiop, &blkio_list, list)
+				blkiop->ops.blkio_update_group_weight_fn(blkg,
+							 newpn->weight ?
+							 newpn->weight :
+							 blkcg->weight);
+		}
+	}
+
+	spin_unlock_irq(&blkcg->lock);
+	spin_unlock(&blkio_list_lock);
+
+free_newpn:
+	if (!keep_newpn)
+		kfree(newpn);
+free_buf:
+	kfree(buf);
+	return ret;
+}
+
+static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
+				      struct seq_file *m)
+{
+	struct blkio_cgroup *blkcg;
+	struct blkio_policy_node *pn;
+
+	seq_printf(m, "dev\tweight\n");
+
+	blkcg = cgroup_to_blkio_cgroup(cgrp);
+	if (!list_empty(&blkcg->policy_list)) {
+		spin_lock_irq(&blkcg->lock);
+		list_for_each_entry(pn, &blkcg->policy_list, node) {
+			seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
+				   MINOR(pn->dev), pn->weight);
+		}
+		spin_unlock_irq(&blkcg->lock);
+	}
+
+	return 0;
+}
 
 struct cftype blkio_files[] = {
 	{
+		.name = "weight_device",
+		.read_seq_string = blkiocg_weight_device_read,
+		.write_string = blkiocg_weight_device_write,
+		.max_write_len = 256,
+	},
+	{
 		.name = "weight",
 		.read_u64 = blkiocg_weight_read,
 		.write_u64 = blkiocg_weight_write,
 	},
 	{
 		.name = "time",
-		.read_seq_string = blkiocg_time_read,
+		.read_map = blkiocg_time_read,
 	},
 	{
 		.name = "sectors",
-		.read_seq_string = blkiocg_sectors_read,
+		.read_map = blkiocg_sectors_read,
+	},
+	{
+		.name = "io_service_bytes",
+		.read_map = blkiocg_io_service_bytes_read,
+	},
+	{
+		.name = "io_serviced",
+		.read_map = blkiocg_io_serviced_read,
+	},
+	{
+		.name = "io_service_time",
+		.read_map = blkiocg_io_service_time_read,
+	},
+	{
+		.name = "io_wait_time",
+		.read_map = blkiocg_io_wait_time_read,
+	},
+	{
+		.name = "io_merged",
+		.read_map = blkiocg_io_merged_read,
+	},
+	{
+		.name = "io_queued",
+		.read_map = blkiocg_io_queued_read,
+	},
+	{
+		.name = "reset_stats",
+		.write_u64 = blkiocg_reset_stats,
 	},
 #ifdef CONFIG_DEBUG_BLK_CGROUP
-       {
+	{
+		.name = "avg_queue_size",
+		.read_map = blkiocg_avg_queue_size_read,
+	},
+	{
+		.name = "group_wait_time",
+		.read_map = blkiocg_group_wait_time_read,
+	},
+	{
+		.name = "idle_time",
+		.read_map = blkiocg_idle_time_read,
+	},
+	{
+		.name = "empty_time",
+		.read_map = blkiocg_empty_time_read,
+	},
+	{
 		.name = "dequeue",
-		.read_seq_string = blkiocg_dequeue_read,
-       },
+		.read_map = blkiocg_dequeue_read,
+	},
 #endif
 };
 
@@ -246,37 +911,42 @@
 	struct blkio_group *blkg;
 	void *key;
 	struct blkio_policy_type *blkiop;
+	struct blkio_policy_node *pn, *pntmp;
 
 	rcu_read_lock();
-remove_entry:
-	spin_lock_irqsave(&blkcg->lock, flags);
+	do {
+		spin_lock_irqsave(&blkcg->lock, flags);
 
-	if (hlist_empty(&blkcg->blkg_list)) {
+		if (hlist_empty(&blkcg->blkg_list)) {
+			spin_unlock_irqrestore(&blkcg->lock, flags);
+			break;
+		}
+
+		blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
+					blkcg_node);
+		key = rcu_dereference(blkg->key);
+		__blkiocg_del_blkio_group(blkg);
+
 		spin_unlock_irqrestore(&blkcg->lock, flags);
-		goto done;
+
+		/*
+		 * This blkio_group is being unlinked as associated cgroup is
+		 * going away. Let all the IO controlling policies know about
+		 * this event. Currently this is static call to one io
+		 * controlling policy. Once we have more policies in place, we
+		 * need some dynamic registration of callback function.
+		 */
+		spin_lock(&blkio_list_lock);
+		list_for_each_entry(blkiop, &blkio_list, list)
+			blkiop->ops.blkio_unlink_group_fn(key, blkg);
+		spin_unlock(&blkio_list_lock);
+	} while (1);
+
+	list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
+		blkio_policy_delete_node(pn);
+		kfree(pn);
 	}
 
-	blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
-				blkcg_node);
-	key = rcu_dereference(blkg->key);
-	__blkiocg_del_blkio_group(blkg);
-
-	spin_unlock_irqrestore(&blkcg->lock, flags);
-
-	/*
-	 * This blkio_group is being unlinked as associated cgroup is going
-	 * away. Let all the IO controlling policies know about this event.
-	 *
-	 * Currently this is static call to one io controlling policy. Once
-	 * we have more policies in place, we need some dynamic registration
-	 * of callback function.
-	 */
-	spin_lock(&blkio_list_lock);
-	list_for_each_entry(blkiop, &blkio_list, list)
-		blkiop->ops.blkio_unlink_group_fn(key, blkg);
-	spin_unlock(&blkio_list_lock);
-	goto remove_entry;
-done:
 	free_css_id(&blkio_subsys, &blkcg->css);
 	rcu_read_unlock();
 	if (blkcg != &blkio_root_cgroup)
@@ -307,6 +977,7 @@
 	spin_lock_init(&blkcg->lock);
 	INIT_HLIST_HEAD(&blkcg->blkg_list);
 
+	INIT_LIST_HEAD(&blkcg->policy_list);
 	return &blkcg->css;
 }
 
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8ccc204..2b866ec 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -23,11 +23,84 @@
 #define blkio_subsys_id blkio_subsys.subsys_id
 #endif
 
+enum stat_type {
+	/* Total time spent (in ns) between request dispatch to the driver and
+	 * request completion for IOs doen by this cgroup. This may not be
+	 * accurate when NCQ is turned on. */
+	BLKIO_STAT_SERVICE_TIME = 0,
+	/* Total bytes transferred */
+	BLKIO_STAT_SERVICE_BYTES,
+	/* Total IOs serviced, post merge */
+	BLKIO_STAT_SERVICED,
+	/* Total time spent waiting in scheduler queue in ns */
+	BLKIO_STAT_WAIT_TIME,
+	/* Number of IOs merged */
+	BLKIO_STAT_MERGED,
+	/* Number of IOs queued up */
+	BLKIO_STAT_QUEUED,
+	/* All the single valued stats go below this */
+	BLKIO_STAT_TIME,
+	BLKIO_STAT_SECTORS,
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	BLKIO_STAT_AVG_QUEUE_SIZE,
+	BLKIO_STAT_IDLE_TIME,
+	BLKIO_STAT_EMPTY_TIME,
+	BLKIO_STAT_GROUP_WAIT_TIME,
+	BLKIO_STAT_DEQUEUE
+#endif
+};
+
+enum stat_sub_type {
+	BLKIO_STAT_READ = 0,
+	BLKIO_STAT_WRITE,
+	BLKIO_STAT_SYNC,
+	BLKIO_STAT_ASYNC,
+	BLKIO_STAT_TOTAL
+};
+
+/* blkg state flags */
+enum blkg_state_flags {
+	BLKG_waiting = 0,
+	BLKG_idling,
+	BLKG_empty,
+};
+
 struct blkio_cgroup {
 	struct cgroup_subsys_state css;
 	unsigned int weight;
 	spinlock_t lock;
 	struct hlist_head blkg_list;
+	struct list_head policy_list; /* list of blkio_policy_node */
+};
+
+struct blkio_group_stats {
+	/* total disk time and nr sectors dispatched by this group */
+	uint64_t time;
+	uint64_t sectors;
+	uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	/* Sum of number of IOs queued across all samples */
+	uint64_t avg_queue_size_sum;
+	/* Count of samples taken for average */
+	uint64_t avg_queue_size_samples;
+	/* How many times this group has been removed from service tree */
+	unsigned long dequeue;
+
+	/* Total time spent waiting for it to be assigned a timeslice. */
+	uint64_t group_wait_time;
+	uint64_t start_group_wait_time;
+
+	/* Time spent idling for this blkio_group */
+	uint64_t idle_time;
+	uint64_t start_idle_time;
+	/*
+	 * Total time when we have requests queued and do not contain the
+	 * current active queue.
+	 */
+	uint64_t empty_time;
+	uint64_t start_empty_time;
+	uint16_t flags;
+#endif
 };
 
 struct blkio_group {
@@ -35,20 +108,25 @@
 	void *key;
 	struct hlist_node blkcg_node;
 	unsigned short blkcg_id;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
 	/* Store cgroup path */
 	char path[128];
-	/* How many times this group has been removed from service tree */
-	unsigned long dequeue;
-#endif
 	/* The device MKDEV(major, minor), this group has been created for */
-	dev_t   dev;
+	dev_t dev;
 
-	/* total disk time and nr sectors dispatched by this group */
-	unsigned long time;
-	unsigned long sectors;
+	/* Need to serialize the stats in the case of reset/update */
+	spinlock_t stats_lock;
+	struct blkio_group_stats stats;
 };
 
+struct blkio_policy_node {
+	struct list_head node;
+	dev_t dev;
+	unsigned int weight;
+};
+
+extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+				     dev_t dev);
+
 typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
 typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
 						unsigned int weight);
@@ -67,6 +145,11 @@
 extern void blkio_policy_register(struct blkio_policy_type *);
 extern void blkio_policy_unregister(struct blkio_policy_type *);
 
+static inline char *blkg_path(struct blkio_group *blkg)
+{
+	return blkg->path;
+}
+
 #else
 
 struct blkio_group {
@@ -78,6 +161,8 @@
 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
 
+static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+
 #endif
 
 #define BLKIO_WEIGHT_MIN	100
@@ -85,16 +170,42 @@
 #define BLKIO_WEIGHT_DEFAULT	500
 
 #ifdef CONFIG_DEBUG_BLK_CGROUP
-static inline char *blkg_path(struct blkio_group *blkg)
-{
-	return blkg->path;
-}
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
 				unsigned long dequeue);
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+
+#define BLKG_FLAG_FNS(name)						\
+static inline void blkio_mark_blkg_##name(				\
+		struct blkio_group_stats *stats)			\
+{									\
+	stats->flags |= (1 << BLKG_##name);				\
+}									\
+static inline void blkio_clear_blkg_##name(				\
+		struct blkio_group_stats *stats)			\
+{									\
+	stats->flags &= ~(1 << BLKG_##name);				\
+}									\
+static inline int blkio_blkg_##name(struct blkio_group_stats *stats)	\
+{									\
+	return (stats->flags & (1 << BLKG_##name)) != 0;		\
+}									\
+
+BLKG_FLAG_FNS(waiting)
+BLKG_FLAG_FNS(idling)
+BLKG_FLAG_FNS(empty)
+#undef BLKG_FLAG_FNS
 #else
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-static inline void blkiocg_update_blkio_group_dequeue_stats(
-			struct blkio_group *blkg, unsigned long dequeue) {}
+static inline void blkiocg_update_avg_queue_size_stats(
+						struct blkio_group *blkg) {}
+static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+						unsigned long dequeue) {}
+static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{}
+static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
+static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
 #endif
 
 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
@@ -105,26 +216,43 @@
 extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
 						void *key);
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
-			unsigned long time, unsigned long sectors);
+void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+					unsigned long time);
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
+						bool direction, bool sync);
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+					bool sync);
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+		struct blkio_group *curr_blkg, bool direction, bool sync);
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+					bool direction, bool sync);
 #else
 struct cgroup;
 static inline struct blkio_cgroup *
 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
 
 static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-			struct blkio_group *blkg, void *key, dev_t dev)
-{
-}
+			struct blkio_group *blkg, void *key, dev_t dev) {}
 
 static inline int
 blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
 
 static inline struct blkio_group *
 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
-			unsigned long time, unsigned long sectors)
-{
-}
+static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+						unsigned long time) {}
+static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+				uint64_t bytes, bool direction, bool sync) {}
+static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+		uint64_t start_time, uint64_t io_start_time, bool direction,
+		bool sync) {}
+static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+						bool direction, bool sync) {}
+static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+		struct blkio_group *curr_blkg, bool direction, bool sync) {}
+static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+						bool direction, bool sync) {}
 #endif
 #endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fe174d..3bc5579 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,6 +127,7 @@
 	rq->tag = -1;
 	rq->ref_count = 1;
 	rq->start_time = jiffies;
+	set_start_time_ns(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -450,6 +451,7 @@
 	 */
 	blk_sync_queue(q);
 
+	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
 	mutex_lock(&q->sysfs_lock);
 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 	mutex_unlock(&q->sysfs_lock);
@@ -510,6 +512,8 @@
 		return NULL;
 	}
 
+	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+		    laptop_mode_timer_fn, (unsigned long) q);
 	init_timer(&q->unplug_timer);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 	INIT_LIST_HEAD(&q->timeout_list);
@@ -568,6 +572,22 @@
 {
 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 
+	return blk_init_allocated_queue_node(q, rfn, lock, node_id);
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+			 spinlock_t *lock)
+{
+	return blk_init_allocated_queue_node(q, rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+struct request_queue *
+blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+			      spinlock_t *lock, int node_id)
+{
 	if (!q)
 		return NULL;
 
@@ -601,7 +621,7 @@
 	blk_put_queue(q);
 	return NULL;
 }
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue_node);
 
 int blk_get_queue(struct request_queue *q)
 {
@@ -1198,6 +1218,7 @@
 		if (!blk_rq_cpu_valid(req))
 			req->cpu = bio->bi_comp_cpu;
 		drive_stat_acct(req, 0);
+		elv_bio_merged(q, req, bio);
 		if (!attempt_back_merge(q, req))
 			elv_merged_request(q, req, el_ret);
 		goto out;
@@ -1231,6 +1252,7 @@
 		if (!blk_rq_cpu_valid(req))
 			req->cpu = bio->bi_comp_cpu;
 		drive_stat_acct(req, 0);
+		elv_bio_merged(q, req, bio);
 		if (!attempt_front_merge(q, req))
 			elv_merged_request(q, req, el_ret);
 		goto out;
@@ -1855,8 +1877,10 @@
 	 * and to it is freed is accounted as io that is in progress at
 	 * the driver side.
 	 */
-	if (blk_account_rq(rq))
+	if (blk_account_rq(rq)) {
 		q->in_flight[rq_is_sync(rq)]++;
+		set_io_start_time_ns(rq);
+	}
 }
 
 /**
@@ -2098,7 +2122,7 @@
 	BUG_ON(blk_queued_rq(req));
 
 	if (unlikely(laptop_mode) && blk_fs_request(req))
-		laptop_io_completion();
+		laptop_io_completion(&req->q->backing_dev_info);
 
 	blk_delete_timer(req);
 
@@ -2517,4 +2541,3 @@
 
 	return 0;
 }
-
diff --git a/block/blk-lib.c b/block/blk-lib.c
new file mode 100644
index 0000000..d0216b9
--- /dev/null
+++ b/block/blk-lib.c
@@ -0,0 +1,233 @@
+/*
+ * Functions related to generic helpers functions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+	}
+
+	if (bio->bi_private)
+		complete(bio->bi_private);
+	__free_page(bio_page(bio));
+
+	bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev:	blockdev to issue discard for
+ * @sector:	start sector
+ * @nr_sects:	number of sectors to discard
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @flags:	BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ *    Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct request_queue *q = bdev_get_queue(bdev);
+	int type = flags & BLKDEV_IFL_BARRIER ?
+		DISCARD_BARRIER : DISCARD_NOBARRIER;
+	struct bio *bio;
+	struct page *page;
+	int ret = 0;
+
+	if (!q)
+		return -ENXIO;
+
+	if (!blk_queue_discard(q))
+		return -EOPNOTSUPP;
+
+	while (nr_sects && !ret) {
+		unsigned int sector_size = q->limits.logical_block_size;
+		unsigned int max_discard_sectors =
+			min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+
+		bio = bio_alloc(gfp_mask, 1);
+		if (!bio)
+			goto out;
+		bio->bi_sector = sector;
+		bio->bi_end_io = blkdev_discard_end_io;
+		bio->bi_bdev = bdev;
+		if (flags & BLKDEV_IFL_WAIT)
+			bio->bi_private = &wait;
+
+		/*
+		 * Add a zeroed one-sector payload as that's what
+		 * our current implementations need.  If we'll ever need
+		 * more the interface will need revisiting.
+		 */
+		page = alloc_page(gfp_mask | __GFP_ZERO);
+		if (!page)
+			goto out_free_bio;
+		if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
+			goto out_free_page;
+
+		/*
+		 * And override the bio size - the way discard works we
+		 * touch many more blocks on disk than the actual payload
+		 * length.
+		 */
+		if (nr_sects > max_discard_sectors) {
+			bio->bi_size = max_discard_sectors << 9;
+			nr_sects -= max_discard_sectors;
+			sector += max_discard_sectors;
+		} else {
+			bio->bi_size = nr_sects << 9;
+			nr_sects = 0;
+		}
+
+		bio_get(bio);
+		submit_bio(type, bio);
+
+		if (flags & BLKDEV_IFL_WAIT)
+			wait_for_completion(&wait);
+
+		if (bio_flagged(bio, BIO_EOPNOTSUPP))
+			ret = -EOPNOTSUPP;
+		else if (!bio_flagged(bio, BIO_UPTODATE))
+			ret = -EIO;
+		bio_put(bio);
+	}
+	return ret;
+out_free_page:
+	__free_page(page);
+out_free_bio:
+	bio_put(bio);
+out:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
+
+struct bio_batch
+{
+	atomic_t 		done;
+	unsigned long 		flags;
+	struct completion 	*wait;
+	bio_end_io_t		*end_io;
+};
+
+static void bio_batch_end_io(struct bio *bio, int err)
+{
+	struct bio_batch *bb = bio->bi_private;
+
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			set_bit(BIO_EOPNOTSUPP, &bb->flags);
+		else
+			clear_bit(BIO_UPTODATE, &bb->flags);
+	}
+	if (bb) {
+		if (bb->end_io)
+			bb->end_io(bio, err);
+		atomic_inc(&bb->done);
+		complete(bb->wait);
+	}
+	bio_put(bio);
+}
+
+/**
+ * blkdev_issue_zeroout generate number of zero filed write bios
+ * @bdev:	blockdev to issue
+ * @sector:	start sector
+ * @nr_sects:	number of sectors to write
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @flags:	BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ *  Generate and issue number of bios with zerofiled pages.
+ *  Send barrier at the beginning and at the end if requested. This guarantie
+ *  correct request ordering. Empty barrier allow us to avoid post queue flush.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+			sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+	int ret = 0;
+	struct bio *bio;
+	struct bio_batch bb;
+	unsigned int sz, issued = 0;
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	atomic_set(&bb.done, 0);
+	bb.flags = 1 << BIO_UPTODATE;
+	bb.wait = &wait;
+	bb.end_io = NULL;
+
+	if (flags & BLKDEV_IFL_BARRIER) {
+		/* issue async barrier before the data */
+		ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
+		if (ret)
+			return ret;
+	}
+submit:
+	while (nr_sects != 0) {
+		bio = bio_alloc(gfp_mask,
+				min(nr_sects, (sector_t)BIO_MAX_PAGES));
+		if (!bio)
+			break;
+
+		bio->bi_sector = sector;
+		bio->bi_bdev   = bdev;
+		bio->bi_end_io = bio_batch_end_io;
+		if (flags & BLKDEV_IFL_WAIT)
+			bio->bi_private = &bb;
+
+		while (nr_sects != 0) {
+			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
+			if (sz == 0)
+				/* bio has maximum size possible */
+				break;
+			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+			nr_sects -= ret >> 9;
+			sector += ret >> 9;
+			if (ret < (sz << 9))
+				break;
+		}
+		issued++;
+		submit_bio(WRITE, bio);
+	}
+	/*
+	 * When all data bios are in flight. Send final barrier if requeted.
+	 */
+	if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
+		ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
+					flags & BLKDEV_IFL_WAIT);
+
+
+	if (flags & BLKDEV_IFL_WAIT)
+		/* Wait for bios in-flight */
+		while ( issued != atomic_read(&bb.done))
+			wait_for_completion(&wait);
+
+	if (!test_bit(BIO_UPTODATE, &bb.flags))
+		/* One of bios in the batch was completed with error.*/
+		ret = -EIO;
+
+	if (ret)
+		goto out;
+
+	if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+	if (nr_sects != 0)
+		goto submit;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f127cf..ed897b5 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -55,6 +55,7 @@
 #define RQ_CIC(rq)		\
 	((struct cfq_io_context *) (rq)->elevator_private)
 #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
+#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elevator_private3)
 
 static struct kmem_cache *cfq_pool;
 static struct kmem_cache *cfq_ioc_pool;
@@ -143,8 +144,6 @@
 	struct cfq_queue *new_cfqq;
 	struct cfq_group *cfqg;
 	struct cfq_group *orig_cfqg;
-	/* Sectors dispatched in current dispatch round */
-	unsigned long nr_sectors;
 };
 
 /*
@@ -346,7 +345,7 @@
 CFQ_CFQQ_FNS(wait_busy);
 #undef CFQ_CFQQ_FNS
 
-#ifdef CONFIG_DEBUG_CFQ_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
 			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
@@ -858,7 +857,7 @@
 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 		cfq_rb_erase(&cfqg->rb_node, st);
 	cfqg->saved_workload_slice = 0;
-	blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
+	blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
 }
 
 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -884,8 +883,7 @@
 			slice_used = cfqq->allocated_slice;
 	}
 
-	cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
-				cfqq->nr_sectors);
+	cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
 	return slice_used;
 }
 
@@ -919,8 +917,8 @@
 
 	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
 					st->min_vdisktime);
-	blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
-						cfqq->nr_sectors);
+	blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
+	blkiocg_set_start_empty_time(&cfqg->blkg);
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -961,7 +959,6 @@
 	if (!cfqg)
 		goto done;
 
-	cfqg->weight = blkcg->weight;
 	for_each_cfqg_st(cfqg, i, j, st)
 		*st = CFQ_RB_ROOT;
 	RB_CLEAR_NODE(&cfqg->rb_node);
@@ -978,6 +975,7 @@
 	sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
 	blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
 					MKDEV(major, minor));
+	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
 
 	/* Add group on cfqd list */
 	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
@@ -1004,6 +1002,12 @@
 	return cfqg;
 }
 
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+	atomic_inc(&cfqg->ref);
+	return cfqg;
+}
+
 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
 {
 	/* Currently, all async queues are mapped to root group */
@@ -1087,6 +1091,12 @@
 {
 	return &cfqd->root_group;
 }
+
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+	return cfqg;
+}
+
 static inline void
 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
 	cfqq->cfqg = cfqg;
@@ -1389,7 +1399,12 @@
 {
 	elv_rb_del(&cfqq->sort_list, rq);
 	cfqq->queued[rq_is_sync(rq)]--;
+	blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+						rq_is_sync(rq));
 	cfq_add_rq_rb(rq);
+	blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
+			rq_is_sync(rq));
 }
 
 static struct request *
@@ -1445,6 +1460,8 @@
 	cfq_del_rq_rb(rq);
 
 	cfqq->cfqd->rq_queued--;
+	blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+						rq_is_sync(rq));
 	if (rq_is_meta(rq)) {
 		WARN_ON(!cfqq->meta_pending);
 		cfqq->meta_pending--;
@@ -1476,6 +1493,13 @@
 	}
 }
 
+static void cfq_bio_merged(struct request_queue *q, struct request *req,
+				struct bio *bio)
+{
+	blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
+					cfq_bio_sync(bio));
+}
+
 static void
 cfq_merged_requests(struct request_queue *q, struct request *rq,
 		    struct request *next)
@@ -1493,6 +1517,8 @@
 	if (cfqq->next_rq == next)
 		cfqq->next_rq = rq;
 	cfq_remove_request(next);
+	blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
+					rq_is_sync(next));
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1520,18 +1546,24 @@
 	return cfqq == RQ_CFQQ(rq);
 }
 
+static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	del_timer(&cfqd->idle_slice_timer);
+	blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+}
+
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
 				   struct cfq_queue *cfqq)
 {
 	if (cfqq) {
 		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
 				cfqd->serving_prio, cfqd->serving_type);
+		blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
 		cfqq->slice_start = 0;
 		cfqq->dispatch_start = jiffies;
 		cfqq->allocated_slice = 0;
 		cfqq->slice_end = 0;
 		cfqq->slice_dispatch = 0;
-		cfqq->nr_sectors = 0;
 
 		cfq_clear_cfqq_wait_request(cfqq);
 		cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1539,7 +1571,7 @@
 		cfq_clear_cfqq_fifo_expire(cfqq);
 		cfq_mark_cfqq_slice_new(cfqq);
 
-		del_timer(&cfqd->idle_slice_timer);
+		cfq_del_timer(cfqd, cfqq);
 	}
 
 	cfqd->active_queue = cfqq;
@@ -1555,7 +1587,7 @@
 	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 
 	if (cfq_cfqq_wait_request(cfqq))
-		del_timer(&cfqd->idle_slice_timer);
+		cfq_del_timer(cfqd, cfqq);
 
 	cfq_clear_cfqq_wait_request(cfqq);
 	cfq_clear_cfqq_wait_busy(cfqq);
@@ -1857,6 +1889,7 @@
 	sl = cfqd->cfq_slice_idle;
 
 	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+	blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
 	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
 }
 
@@ -1876,7 +1909,8 @@
 	elv_dispatch_sort(q, rq);
 
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
-	cfqq->nr_sectors += blk_rq_sectors(rq);
+	blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
+					rq_data_dir(rq), rq_is_sync(rq));
 }
 
 /*
@@ -3185,11 +3219,14 @@
 		if (cfq_cfqq_wait_request(cfqq)) {
 			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
 			    cfqd->busy_queues > 1) {
-				del_timer(&cfqd->idle_slice_timer);
+				cfq_del_timer(cfqd, cfqq);
 				cfq_clear_cfqq_wait_request(cfqq);
 				__blk_run_queue(cfqd->queue);
-			} else
+			} else {
+				blkiocg_update_idle_time_stats(
+						&cfqq->cfqg->blkg);
 				cfq_mark_cfqq_must_dispatch(cfqq);
+			}
 		}
 	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
 		/*
@@ -3214,7 +3251,9 @@
 	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 	cfq_add_rq_rb(rq);
-
+	blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+			&cfqd->serving_group->blkg, rq_data_dir(rq),
+			rq_is_sync(rq));
 	cfq_rq_enqueued(cfqd, cfqq, rq);
 }
 
@@ -3300,6 +3339,9 @@
 	WARN_ON(!cfqq->dispatched);
 	cfqd->rq_in_driver--;
 	cfqq->dispatched--;
+	blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
+			rq_io_start_time_ns(rq), rq_data_dir(rq),
+			rq_is_sync(rq));
 
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -3440,6 +3482,10 @@
 		rq->elevator_private = NULL;
 		rq->elevator_private2 = NULL;
 
+		/* Put down rq reference on cfqg */
+		cfq_put_cfqg(RQ_CFQG(rq));
+		rq->elevator_private3 = NULL;
+
 		cfq_put_queue(cfqq);
 	}
 }
@@ -3528,6 +3574,7 @@
 
 	rq->elevator_private = cic;
 	rq->elevator_private2 = cfqq;
+	rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
 	return 0;
 
 queue_fail:
@@ -3743,7 +3790,6 @@
 	 * second, in order to have larger depth for async operations.
 	 */
 	cfqd->last_delayed_sync = jiffies - HZ;
-	INIT_RCU_HEAD(&cfqd->rcu);
 	return cfqd;
 }
 
@@ -3872,6 +3918,7 @@
 		.elevator_merged_fn =		cfq_merged_request,
 		.elevator_merge_req_fn =	cfq_merged_requests,
 		.elevator_allow_merge_fn =	cfq_allow_merge,
+		.elevator_bio_merged_fn =	cfq_bio_merged,
 		.elevator_dispatch_fn =		cfq_dispatch_requests,
 		.elevator_add_req_fn =		cfq_insert_request,
 		.elevator_activate_req_fn =	cfq_activate_request,
diff --git a/block/elevator.c b/block/elevator.c
index 76e3702..6df2b50 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -539,6 +539,15 @@
 	q->last_merge = rq;
 }
 
+void elv_bio_merged(struct request_queue *q, struct request *rq,
+			struct bio *bio)
+{
+	struct elevator_queue *e = q->elevator;
+
+	if (e->ops->elevator_bio_merged_fn)
+		e->ops->elevator_bio_merged_fn(q, rq, bio);
+}
+
 void elv_requeue_request(struct request_queue *q, struct request *rq)
 {
 	/*
@@ -921,6 +930,7 @@
 	}
 	return error;
 }
+EXPORT_SYMBOL(elv_register_queue);
 
 static void __elv_unregister_queue(struct elevator_queue *e)
 {
@@ -933,6 +943,7 @@
 	if (q)
 		__elv_unregister_queue(q->elevator);
 }
+EXPORT_SYMBOL(elv_unregister_queue);
 
 void elv_register(struct elevator_type *e)
 {
diff --git a/block/genhd.c b/block/genhd.c
index d13ba76..59a2db6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -596,6 +596,7 @@
 
 	return disk;
 }
+EXPORT_SYMBOL(get_gendisk);
 
 /**
  * bdget_disk - do bdget() by gendisk and partition number
@@ -987,7 +988,6 @@
 	if (!new_ptbl)
 		return -ENOMEM;
 
-	INIT_RCU_HEAD(&new_ptbl->rcu_head);
 	new_ptbl->len = target;
 
 	for (i = 0; i < len; i++)
diff --git a/block/ioctl.c b/block/ioctl.c
index 8905d2a..e8eb679 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -126,7 +126,7 @@
 	if (start + len > (bdev->bd_inode->i_size >> 9))
 		return -EINVAL;
 	return blkdev_issue_discard(bdev, start, len, GFP_KERNEL,
-				    DISCARD_FL_WAIT);
+				    BLKDEV_IFL_WAIT);
 }
 
 static int put_ushort(unsigned long arg, unsigned short val)
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index fe980da..98a6610 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -24,10 +24,287 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 
+#include <crypto/scatterwalk.h>
+
 #include "internal.h"
 
 static const char *skcipher_default_geniv __read_mostly;
 
+struct ablkcipher_buffer {
+	struct list_head	entry;
+	struct scatter_walk	dst;
+	unsigned int		len;
+	void			*data;
+};
+
+enum {
+	ABLKCIPHER_WALK_SLOW = 1 << 0,
+};
+
+static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
+{
+	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
+}
+
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+	struct ablkcipher_buffer *p, *tmp;
+
+	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+		ablkcipher_buffer_write(p);
+		list_del(&p->entry);
+		kfree(p);
+	}
+}
+EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
+
+static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
+					  struct ablkcipher_buffer *p)
+{
+	p->dst = walk->out;
+	list_add_tail(&p->entry, &walk->buffers);
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+{
+	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+	return max(start, end_page);
+}
+
+static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+						unsigned int bsize)
+{
+	unsigned int n = bsize;
+
+	for (;;) {
+		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+		if (len_this_page > n)
+			len_this_page = n;
+		scatterwalk_advance(&walk->out, n);
+		if (n == len_this_page)
+			break;
+		n -= len_this_page;
+		scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+	}
+
+	return bsize;
+}
+
+static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+						unsigned int n)
+{
+	scatterwalk_advance(&walk->in, n);
+	scatterwalk_advance(&walk->out, n);
+
+	return n;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+				struct ablkcipher_walk *walk);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk, int err)
+{
+	struct crypto_tfm *tfm = req->base.tfm;
+	unsigned int nbytes = 0;
+
+	if (likely(err >= 0)) {
+		unsigned int n = walk->nbytes - err;
+
+		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+			n = ablkcipher_done_fast(walk, n);
+		else if (WARN_ON(err)) {
+			err = -EINVAL;
+			goto err;
+		} else
+			n = ablkcipher_done_slow(walk, n);
+
+		nbytes = walk->total - n;
+		err = 0;
+	}
+
+	scatterwalk_done(&walk->in, 0, nbytes);
+	scatterwalk_done(&walk->out, 1, nbytes);
+
+err:
+	walk->total = nbytes;
+	walk->nbytes = nbytes;
+
+	if (nbytes) {
+		crypto_yield(req->base.flags);
+		return ablkcipher_walk_next(req, walk);
+	}
+
+	if (walk->iv != req->info)
+		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+	if (walk->iv_buffer)
+		kfree(walk->iv_buffer);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+
+static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
+				       struct ablkcipher_walk *walk,
+				       unsigned int bsize,
+				       unsigned int alignmask,
+				       void **src_p, void **dst_p)
+{
+	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
+	struct ablkcipher_buffer *p;
+	void *src, *dst, *base;
+	unsigned int n;
+
+	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
+	n += (aligned_bsize * 3 - (alignmask + 1) +
+	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
+
+	p = kmalloc(n, GFP_ATOMIC);
+	if (!p)
+		ablkcipher_walk_done(req, walk, -ENOMEM);
+
+	base = p + 1;
+
+	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
+	src = dst = ablkcipher_get_spot(dst, bsize);
+
+	p->len = bsize;
+	p->data = dst;
+
+	scatterwalk_copychunks(src, &walk->in, bsize, 0);
+
+	ablkcipher_queue_write(walk, p);
+
+	walk->nbytes = bsize;
+	walk->flags |= ABLKCIPHER_WALK_SLOW;
+
+	*src_p = src;
+	*dst_p = dst;
+
+	return 0;
+}
+
+static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
+				     struct crypto_tfm *tfm,
+				     unsigned int alignmask)
+{
+	unsigned bs = walk->blocksize;
+	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
+	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+			    (alignmask + 1);
+	u8 *iv;
+
+	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
+	if (!walk->iv_buffer)
+		return -ENOMEM;
+
+	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
+	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+	iv = ablkcipher_get_spot(iv, ivsize);
+
+	walk->iv = memcpy(iv, walk->iv, ivsize);
+	return 0;
+}
+
+static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
+				       struct ablkcipher_walk *walk)
+{
+	walk->src.page = scatterwalk_page(&walk->in);
+	walk->src.offset = offset_in_page(walk->in.offset);
+	walk->dst.page = scatterwalk_page(&walk->out);
+	walk->dst.offset = offset_in_page(walk->out.offset);
+
+	return 0;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+				struct ablkcipher_walk *walk)
+{
+	struct crypto_tfm *tfm = req->base.tfm;
+	unsigned int alignmask, bsize, n;
+	void *src, *dst;
+	int err;
+
+	alignmask = crypto_tfm_alg_alignmask(tfm);
+	n = walk->total;
+	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
+		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+		return ablkcipher_walk_done(req, walk, -EINVAL);
+	}
+
+	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
+	src = dst = NULL;
+
+	bsize = min(walk->blocksize, n);
+	n = scatterwalk_clamp(&walk->in, n);
+	n = scatterwalk_clamp(&walk->out, n);
+
+	if (n < bsize ||
+	    !scatterwalk_aligned(&walk->in, alignmask) ||
+	    !scatterwalk_aligned(&walk->out, alignmask)) {
+		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
+					   &src, &dst);
+		goto set_phys_lowmem;
+	}
+
+	walk->nbytes = n;
+
+	return ablkcipher_next_fast(req, walk);
+
+set_phys_lowmem:
+	if (err >= 0) {
+		walk->src.page = virt_to_page(src);
+		walk->dst.page = virt_to_page(dst);
+		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
+		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
+	}
+
+	return err;
+}
+
+static int ablkcipher_walk_first(struct ablkcipher_request *req,
+				 struct ablkcipher_walk *walk)
+{
+	struct crypto_tfm *tfm = req->base.tfm;
+	unsigned int alignmask;
+
+	alignmask = crypto_tfm_alg_alignmask(tfm);
+	if (WARN_ON_ONCE(in_irq()))
+		return -EDEADLK;
+
+	walk->nbytes = walk->total;
+	if (unlikely(!walk->total))
+		return 0;
+
+	walk->iv_buffer = NULL;
+	walk->iv = req->info;
+	if (unlikely(((unsigned long)walk->iv & alignmask))) {
+		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+		if (err)
+			return err;
+	}
+
+	scatterwalk_start(&walk->in, walk->in.sg);
+	scatterwalk_start(&walk->out, walk->out.sg);
+
+	return ablkcipher_walk_next(req, walk);
+}
+
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk)
+{
+	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+	return ablkcipher_walk_first(req, walk);
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
+
 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 			    unsigned int keylen)
 {
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fae27..c3cf1a6 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -544,7 +544,7 @@
 {
 	int err = -EINVAL;
 
-	if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+	if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
 		goto out;
 
 	spawn->frontend = frontend;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 05eb32e..b9884ee 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -181,6 +181,7 @@
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	unsigned int cryptlen = req->cryptlen;
 
 	if (err)
 		goto out;
@@ -196,6 +197,7 @@
 		goto out;
 
 	authsize = crypto_aead_authsize(authenc);
+	cryptlen -= authsize;
 	ihash = ahreq->result + authsize;
 	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
 				 authsize, 0);
@@ -209,7 +211,7 @@
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					req->base.complete, req->base.data);
 	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     req->cryptlen, req->iv);
+				     cryptlen, req->iv);
 
 	err = crypto_ablkcipher_decrypt(abreq);
 
@@ -228,11 +230,13 @@
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	unsigned int cryptlen = req->cryptlen;
 
 	if (err)
 		goto out;
 
 	authsize = crypto_aead_authsize(authenc);
+	cryptlen -= authsize;
 	ihash = ahreq->result + authsize;
 	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
 				 authsize, 0);
@@ -246,7 +250,7 @@
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					req->base.complete, req->base.data);
 	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     req->cryptlen, req->iv);
+				     cryptlen, req->iv);
 
 	err = crypto_ablkcipher_decrypt(abreq);
 
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d22636..d4384b0 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 8020124..247178c 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -315,16 +315,13 @@
 	goto out;
 }
 
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+						 u32 type, u32 mask)
 {
 	struct crypto_instance *inst;
 	struct crypto_alg *alg;
-	struct crypto_attr_type *algt;
 
-	algt = crypto_get_attr_type(tb);
-
-	alg = crypto_get_attr_alg(tb, algt->type,
-				  (algt->mask & CRYPTO_ALG_TYPE_MASK));
+	alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
 	if (IS_ERR(alg))
 		return ERR_CAST(alg);
 
@@ -365,7 +362,7 @@
 
 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_AEAD:
-		return pcrypt_alloc_aead(tb);
+		return pcrypt_alloc_aead(tb, algt->type, algt->mask);
 	}
 
 	return ERR_PTR(-EINVAL);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a4..41e529a 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@
 
 void scatterwalk_done(struct scatter_walk *walk, int out, int more)
 {
-	if (!offset_in_page(walk->offset) || !more)
+	if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
 		scatterwalk_pagedone(walk, out, more);
 }
 EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/shash.c b/crypto/shash.c
index 91f7b9d..22fd943 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -37,7 +37,7 @@
 	u8 *buffer, *alignbuffer;
 	int err;
 
-	absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
+	absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
 	buffer = kmalloc(absize, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a351599..3ca68f9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -394,6 +394,17 @@
 	return 0;
 }
 
+static void test_hash_sg_init(struct scatterlist *sg)
+{
+	int i;
+
+	sg_init_table(sg, TVMEMSIZE);
+	for (i = 0; i < TVMEMSIZE; i++) {
+		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+		memset(tvmem[i], 0xff, PAGE_SIZE);
+	}
+}
+
 static void test_hash_speed(const char *algo, unsigned int sec,
 			    struct hash_speed *speed)
 {
@@ -423,12 +434,7 @@
 		goto out;
 	}
 
-	sg_init_table(sg, TVMEMSIZE);
-	for (i = 0; i < TVMEMSIZE; i++) {
-		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
-		memset(tvmem[i], 0xff, PAGE_SIZE);
-	}
-
+	test_hash_sg_init(sg);
 	for (i = 0; speed[i].blen != 0; i++) {
 		if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
 			printk(KERN_ERR
@@ -437,6 +443,9 @@
 			goto out;
 		}
 
+		if (speed[i].klen)
+			crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
 		printk(KERN_INFO "test%3u "
 		       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
 		       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@
 	crypto_free_hash(tfm);
 }
 
+struct tcrypt_result {
+	struct completion completion;
+	int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+	struct tcrypt_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		struct tcrypt_result *tr = req->base.data;
+
+		ret = wait_for_completion_interruptible(&tr->completion);
+		if (!ret)
+			ret = tr->err;
+		INIT_COMPLETION(tr->completion);
+	}
+	return ret;
+}
+
+static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
+				     char *out, int sec)
+{
+	unsigned long start, end;
+	int bcount;
+	int ret;
+
+	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	     time_before(jiffies, end); bcount++) {
+		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+		if (ret)
+			return ret;
+	}
+
+	printk("%6u opers/sec, %9lu bytes/sec\n",
+	       bcount / sec, ((long)bcount * blen) / sec);
+
+	return 0;
+}
+
+static int test_ahash_jiffies(struct ahash_request *req, int blen,
+			      int plen, char *out, int sec)
+{
+	unsigned long start, end;
+	int bcount, pcount;
+	int ret;
+
+	if (plen == blen)
+		return test_ahash_jiffies_digest(req, blen, out, sec);
+
+	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	     time_before(jiffies, end); bcount++) {
+		ret = crypto_ahash_init(req);
+		if (ret)
+			return ret;
+		for (pcount = 0; pcount < blen; pcount += plen) {
+			ret = do_one_ahash_op(req, crypto_ahash_update(req));
+			if (ret)
+				return ret;
+		}
+		/* we assume there is enough space in 'out' for the result */
+		ret = do_one_ahash_op(req, crypto_ahash_final(req));
+		if (ret)
+			return ret;
+	}
+
+	pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+		bcount / sec, ((long)bcount * blen) / sec);
+
+	return 0;
+}
+
+static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
+				    char *out)
+{
+	unsigned long cycles = 0;
+	int ret, i;
+
+	/* Warm-up run. */
+	for (i = 0; i < 4; i++) {
+		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+		if (ret)
+			goto out;
+	}
+
+	/* The real thing. */
+	for (i = 0; i < 8; i++) {
+		cycles_t start, end;
+
+		start = get_cycles();
+
+		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+		if (ret)
+			goto out;
+
+		end = get_cycles();
+
+		cycles += end - start;
+	}
+
+out:
+	if (ret)
+		return ret;
+
+	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+		cycles / 8, cycles / (8 * blen));
+
+	return 0;
+}
+
+static int test_ahash_cycles(struct ahash_request *req, int blen,
+			     int plen, char *out)
+{
+	unsigned long cycles = 0;
+	int i, pcount, ret;
+
+	if (plen == blen)
+		return test_ahash_cycles_digest(req, blen, out);
+
+	/* Warm-up run. */
+	for (i = 0; i < 4; i++) {
+		ret = crypto_ahash_init(req);
+		if (ret)
+			goto out;
+		for (pcount = 0; pcount < blen; pcount += plen) {
+			ret = do_one_ahash_op(req, crypto_ahash_update(req));
+			if (ret)
+				goto out;
+		}
+		ret = do_one_ahash_op(req, crypto_ahash_final(req));
+		if (ret)
+			goto out;
+	}
+
+	/* The real thing. */
+	for (i = 0; i < 8; i++) {
+		cycles_t start, end;
+
+		start = get_cycles();
+
+		ret = crypto_ahash_init(req);
+		if (ret)
+			goto out;
+		for (pcount = 0; pcount < blen; pcount += plen) {
+			ret = do_one_ahash_op(req, crypto_ahash_update(req));
+			if (ret)
+				goto out;
+		}
+		ret = do_one_ahash_op(req, crypto_ahash_final(req));
+		if (ret)
+			goto out;
+
+		end = get_cycles();
+
+		cycles += end - start;
+	}
+
+out:
+	if (ret)
+		return ret;
+
+	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+		cycles / 8, cycles / (8 * blen));
+
+	return 0;
+}
+
+static void test_ahash_speed(const char *algo, unsigned int sec,
+			     struct hash_speed *speed)
+{
+	struct scatterlist sg[TVMEMSIZE];
+	struct tcrypt_result tresult;
+	struct ahash_request *req;
+	struct crypto_ahash *tfm;
+	static char output[1024];
+	int i, ret;
+
+	printk(KERN_INFO "\ntesting speed of async %s\n", algo);
+
+	tfm = crypto_alloc_ahash(algo, 0, 0);
+	if (IS_ERR(tfm)) {
+		pr_err("failed to load transform for %s: %ld\n",
+		       algo, PTR_ERR(tfm));
+		return;
+	}
+
+	if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
+		pr_err("digestsize(%u) > outputbuffer(%zu)\n",
+		       crypto_ahash_digestsize(tfm), sizeof(output));
+		goto out;
+	}
+
+	test_hash_sg_init(sg);
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		pr_err("ahash request allocation failure\n");
+		goto out;
+	}
+
+	init_completion(&tresult.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   tcrypt_complete, &tresult);
+
+	for (i = 0; speed[i].blen != 0; i++) {
+		if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+			pr_err("template (%u) too big for tvmem (%lu)\n",
+			       speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+			break;
+		}
+
+		pr_info("test%3u "
+			"(%5u byte blocks,%5u bytes per update,%4u updates): ",
+			i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+		ahash_request_set_crypt(req, sg, output, speed[i].plen);
+
+		if (sec)
+			ret = test_ahash_jiffies(req, speed[i].blen,
+						 speed[i].plen, output, sec);
+		else
+			ret = test_ahash_cycles(req, speed[i].blen,
+						speed[i].plen, output);
+
+		if (ret) {
+			pr_err("hashing failed ret=%d\n", ret);
+			break;
+		}
+	}
+
+	ahash_request_free(req);
+
+out:
+	crypto_free_ahash(tfm);
+}
+
 static void test_available(void)
 {
 	char **name = check;
@@ -881,9 +1134,87 @@
 		test_hash_speed("rmd320", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
 
+	case 318:
+		test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+		if (mode > 300 && mode < 400) break;
+
 	case 399:
 		break;
 
+	case 400:
+		/* fall through */
+
+	case 401:
+		test_ahash_speed("md4", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 402:
+		test_ahash_speed("md5", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 403:
+		test_ahash_speed("sha1", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 404:
+		test_ahash_speed("sha256", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 405:
+		test_ahash_speed("sha384", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 406:
+		test_ahash_speed("sha512", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 407:
+		test_ahash_speed("wp256", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 408:
+		test_ahash_speed("wp384", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 409:
+		test_ahash_speed("wp512", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 410:
+		test_ahash_speed("tgr128", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 411:
+		test_ahash_speed("tgr160", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 412:
+		test_ahash_speed("tgr192", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 413:
+		test_ahash_speed("sha224", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 414:
+		test_ahash_speed("rmd128", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 415:
+		test_ahash_speed("rmd160", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 416:
+		test_ahash_speed("rmd256", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 417:
+		test_ahash_speed("rmd320", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+
+	case 499:
+		break;
+
 	case 1000:
 		test_available();
 		break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfa..10cb925 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@
 struct hash_speed {
 	unsigned int blen;	/* buffer length */
 	unsigned int plen;	/* per-update length */
+	unsigned int klen;	/* key length */
 };
 
 /*
@@ -83,4 +84,32 @@
 	{  .blen = 0,	.plen = 0, }
 };
 
+static struct hash_speed hash_speed_template_16[] = {
+	{ .blen = 16,	.plen = 16,	.klen = 16, },
+	{ .blen = 64,	.plen = 16,	.klen = 16, },
+	{ .blen = 64,	.plen = 64,	.klen = 16, },
+	{ .blen = 256,	.plen = 16,	.klen = 16, },
+	{ .blen = 256,	.plen = 64,	.klen = 16, },
+	{ .blen = 256,	.plen = 256,	.klen = 16, },
+	{ .blen = 1024,	.plen = 16,	.klen = 16, },
+	{ .blen = 1024,	.plen = 256,	.klen = 16, },
+	{ .blen = 1024,	.plen = 1024,	.klen = 16, },
+	{ .blen = 2048,	.plen = 16,	.klen = 16, },
+	{ .blen = 2048,	.plen = 256,	.klen = 16, },
+	{ .blen = 2048,	.plen = 1024,	.klen = 16, },
+	{ .blen = 2048,	.plen = 2048,	.klen = 16, },
+	{ .blen = 4096,	.plen = 16,	.klen = 16, },
+	{ .blen = 4096,	.plen = 256,	.klen = 16, },
+	{ .blen = 4096,	.plen = 1024,	.klen = 16, },
+	{ .blen = 4096,	.plen = 4096,	.klen = 16, },
+	{ .blen = 8192,	.plen = 16,	.klen = 16, },
+	{ .blen = 8192,	.plen = 256,	.klen = 16, },
+	{ .blen = 8192,	.plen = 1024,	.klen = 16, },
+	{ .blen = 8192,	.plen = 4096,	.klen = 16, },
+	{ .blen = 8192,	.plen = 8192,	.klen = 16, },
+
+	/* End marker */
+	{  .blen = 0,	.plen = 0,	.klen = 0, }
+};
+
 #endif	/* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c494d76..5c8aaa0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -153,8 +153,21 @@
 		free_page((unsigned long)buf[i]);
 }
 
+static int do_one_async_hash_op(struct ahash_request *req,
+				struct tcrypt_result *tr,
+				int ret)
+{
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret = wait_for_completion_interruptible(&tr->completion);
+		if (!ret)
+			ret = tr->err;
+		INIT_COMPLETION(tr->completion);
+	}
+	return ret;
+}
+
 static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
-		     unsigned int tcount)
+		     unsigned int tcount, bool use_digest)
 {
 	const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
 	unsigned int i, j, k, temp;
@@ -206,23 +219,36 @@
 		}
 
 		ahash_request_set_crypt(req, sg, result, template[i].psize);
-		ret = crypto_ahash_digest(req);
-		switch (ret) {
-		case 0:
-			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			ret = wait_for_completion_interruptible(
-				&tresult.completion);
-			if (!ret && !(ret = tresult.err)) {
-				INIT_COMPLETION(tresult.completion);
-				break;
+		if (use_digest) {
+			ret = do_one_async_hash_op(req, &tresult,
+						   crypto_ahash_digest(req));
+			if (ret) {
+				pr_err("alg: hash: digest failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
 			}
-			/* fall through */
-		default:
-			printk(KERN_ERR "alg: hash: digest failed on test %d "
-			       "for %s: ret=%d\n", j, algo, -ret);
-			goto out;
+		} else {
+			ret = do_one_async_hash_op(req, &tresult,
+						   crypto_ahash_init(req));
+			if (ret) {
+				pr_err("alt: hash: init failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
+			}
+			ret = do_one_async_hash_op(req, &tresult,
+						   crypto_ahash_update(req));
+			if (ret) {
+				pr_err("alt: hash: update failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
+			}
+			ret = do_one_async_hash_op(req, &tresult,
+						   crypto_ahash_final(req));
+			if (ret) {
+				pr_err("alt: hash: final failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
+			}
 		}
 
 		if (memcmp(result, template[i].digest,
@@ -1402,7 +1428,11 @@
 		return PTR_ERR(tfm);
 	}
 
-	err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
+	err = test_hash(tfm, desc->suite.hash.vecs,
+			desc->suite.hash.count, true);
+	if (!err)
+		err = test_hash(tfm, desc->suite.hash.vecs,
+				desc->suite.hash.count, false);
 
 	crypto_free_ahash(tfm);
 	return err;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index fb76517..74e3537 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1669,17 +1669,73 @@
 	}
 };
 
-#define VMAC_AES_TEST_VECTORS	1
-static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+#define VMAC_AES_TEST_VECTORS	8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
 				'\x02', '\x03', '\x02', '\x02',
 				'\x02', '\x04', '\x01', '\x07',
 				'\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				'a', 'b', 'c', 'a', 'b', 'c',
+				};
+
 static struct hash_testvec aes_vmac128_tv_template[] = {
 	{
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.plaintext = NULL,
+		.digest	= "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+		.psize	= 0,
+		.ksize	= 16,
+	}, {
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-		.plaintext = vmac_string,
-		.digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+		.plaintext = vmac_string1,
+		.digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+		.psize  = 128,
+		.ksize  = 16,
+	}, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.plaintext = vmac_string2,
+		.digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+		.psize  = 128,
+		.ksize  = 16,
+	}, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.plaintext = vmac_string3,
+		.digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+		.psize  = 128,
+		.ksize  = 16,
+	}, {
+		.key	= "abcdefghijklmnop",
+		.plaintext = NULL,
+		.digest	= "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+		.psize	= 0,
+		.ksize	= 16,
+	}, {
+		.key    = "abcdefghijklmnop",
+		.plaintext = vmac_string1,
+		.digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+		.psize  = 128,
+		.ksize  = 16,
+	}, {
+		.key    = "abcdefghijklmnop",
+		.plaintext = vmac_string2,
+		.digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+		.psize  = 128,
+		.ksize  = 16,
+	}, {
+		.key    = "abcdefghijklmnop",
+		.plaintext = vmac_string3,
+		.digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
 		.psize  = 128,
 		.ksize  = 16,
 	},
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e..0999274 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@
 const u64 m64   = UINT64_C(0xffffffffffffffff);  /* 64-bit mask       */
 const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
 
+#define pe64_to_cpup le64_to_cpup		/* Prefer little endian */
+
 #ifdef __LITTLE_ENDIAN
 #define INDEX_HIGH 1
 #define INDEX_LOW 0
@@ -110,8 +112,8 @@
 		int i; u64 th, tl;					\
 		rh = rl = 0;						\
 		for (i = 0; i < nw; i += 2) {				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
 			ADD128(rh, rl, th, tl);				\
 		}							\
 	} while (0)
@@ -121,11 +123,11 @@
 		int i; u64 th, tl;					\
 		rh1 = rl1 = rh = rl = 0;				\
 		for (i = 0; i < nw; i += 2) {				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
 			ADD128(rh1, rl1, th, tl);			\
 		}							\
 	} while (0)
@@ -136,17 +138,17 @@
 		int i; u64 th, tl;					\
 		rh = rl = 0;						\
 		for (i = 0; i < nw; i += 8) {				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2],	\
-				le64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2],	\
+				pe64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4],	\
-				le64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4],	\
+				pe64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6],	\
-				le64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6],	\
+				pe64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
 			ADD128(rh, rl, th, tl);				\
 		}							\
 	} while (0)
@@ -156,29 +158,29 @@
 		int i; u64 th, tl;					\
 		rh1 = rl1 = rh = rl = 0;				\
 		for (i = 0; i < nw; i += 8) {				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2],	\
-				le64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],	\
+				pe64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
 			ADD128(rh1, rl1, th, tl);			\
-			MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2],	\
-				le64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2],	\
+				pe64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4],	\
-				le64_to_cpup((mp)+i+3)+(kp)[i+5]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4],	\
+				pe64_to_cpup((mp)+i+3)+(kp)[i+5]);	\
 			ADD128(rh1, rl1, th, tl);			\
-			MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4],	\
-				le64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4],	\
+				pe64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6],	\
-				le64_to_cpup((mp)+i+5)+(kp)[i+7]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6],	\
+				pe64_to_cpup((mp)+i+5)+(kp)[i+7]);	\
 			ADD128(rh1, rl1, th, tl);			\
-			MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6],	\
-				le64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6],	\
+				pe64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
 			ADD128(rh, rl, th, tl);				\
-			MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8],	\
-				le64_to_cpup((mp)+i+7)+(kp)[i+9]);	\
+			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8],	\
+				pe64_to_cpup((mp)+i+7)+(kp)[i+9]);	\
 			ADD128(rh1, rl1, th, tl);			\
 		}							\
 	} while (0)
@@ -216,8 +218,8 @@
 		int i;							\
 		rh = rl = t = 0;					\
 		for (i = 0; i < nw; i += 2)  {				\
-			t1 = le64_to_cpup(mp+i) + kp[i];		\
-			t2 = le64_to_cpup(mp+i+1) + kp[i+1];		\
+			t1 = pe64_to_cpup(mp+i) + kp[i];		\
+			t2 = pe64_to_cpup(mp+i+1) + kp[i+1];		\
 			m2 = MUL32(t1 >> 32, t2);			\
 			m1 = MUL32(t1, t2 >> 32);			\
 			ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32),	\
@@ -322,8 +324,7 @@
 	ctx->first_block_processed = 0;
 }
 
-static u64 l3hash(u64 p1, u64 p2,
-			u64 k1, u64 k2, u64 len)
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
 {
 	u64 rh, rl, t, z = 0;
 
@@ -474,7 +475,7 @@
 	}
 	p = be64_to_cpup(out_p + i);
 	h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
-	return p + h;
+	return le64_to_cpu(p + h);
 }
 
 static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@
 
 static int vmac_init(struct shash_desc *pdesc)
 {
-	struct crypto_shash *parent = pdesc->tfm;
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
-	memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
 	return 0;
 }
 
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index e35525b..c79e789 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -71,7 +71,7 @@
 	struct list_head node;
 };
 
-static ssize_t acpi_table_show(struct kobject *kobj,
+static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr, char *buf,
 			       loff_t offset, size_t count)
 {
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd52c48..ef38aff 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -18,9 +18,9 @@
 
 config DEVTMPFS
 	bool "Maintain a devtmpfs filesystem to mount at /dev"
-	depends on HOTPLUG && SHMEM && TMPFS
+	depends on HOTPLUG
 	help
-	  This creates a tmpfs filesystem instance early at bootup.
+	  This creates a tmpfs/ramfs filesystem instance early at bootup.
 	  In this filesystem, the kernel driver core maintains device
 	  nodes with their default names and permissions for all
 	  registered devices with an assigned major/minor number.
@@ -33,6 +33,9 @@
 	  functional /dev without any further help. It also allows simple
 	  rescue systems, and reliably handles dynamic major/minor numbers.
 
+	  Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+	  file system will be used instead.
+
 config DEVTMPFS_MOUNT
 	bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
 	depends on DEVTMPFS
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c6a0d6..8e231d0 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -63,6 +63,14 @@
 	kfree(cp);
 }
 
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+	struct class_private *cp = to_class(kobj);
+	struct class *class = cp->class;
+
+	return class->ns_type;
+}
+
 static const struct sysfs_ops class_sysfs_ops = {
 	.show	= class_attr_show,
 	.store	= class_attr_store,
@@ -71,6 +79,7 @@
 static struct kobj_type class_ktype = {
 	.sysfs_ops	= &class_sysfs_ops,
 	.release	= class_release,
+	.child_ns_type	= class_child_ns_type,
 };
 
 /* Hotplug events for classes go to the class class_subsys */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b56a0ba..9630fbd 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -20,7 +20,6 @@
 #include <linux/notifier.h>
 #include <linux/genhd.h>
 #include <linux/kallsyms.h>
-#include <linux/semaphore.h>
 #include <linux/mutex.h>
 #include <linux/async.h>
 
@@ -132,9 +131,21 @@
 	kfree(p);
 }
 
+static const void *device_namespace(struct kobject *kobj)
+{
+	struct device *dev = to_dev(kobj);
+	const void *ns = NULL;
+
+	if (dev->class && dev->class->ns_type)
+		ns = dev->class->namespace(dev);
+
+	return ns;
+}
+
 static struct kobj_type device_ktype = {
 	.release	= device_release,
 	.sysfs_ops	= &dev_sysfs_ops,
+	.namespace	= device_namespace,
 };
 
 
@@ -559,10 +570,10 @@
 	dev->kobj.kset = devices_kset;
 	kobject_init(&dev->kobj, &device_ktype);
 	INIT_LIST_HEAD(&dev->dma_pools);
-	init_MUTEX(&dev->sem);
+	mutex_init(&dev->mutex);
+	lockdep_set_novalidate_class(&dev->mutex);
 	spin_lock_init(&dev->devres_lock);
 	INIT_LIST_HEAD(&dev->devres_head);
-	device_init_wakeup(dev, 0);
 	device_pm_init(dev);
 	set_dev_node(dev, -1);
 }
@@ -596,11 +607,59 @@
 	return virtual_dir;
 }
 
+struct class_dir {
+	struct kobject kobj;
+	struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+	struct class_dir *dir = to_class_dir(kobj);
+	kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
+{
+	struct class_dir *dir = to_class_dir(kobj);
+	return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+	.release	= class_dir_release,
+	.sysfs_ops	= &kobj_sysfs_ops,
+	.child_ns_type	= class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+	struct class_dir *dir;
+	int retval;
+
+	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+	if (!dir)
+		return NULL;
+
+	dir->class = class;
+	kobject_init(&dir->kobj, &class_dir_ktype);
+
+	dir->kobj.kset = &class->p->class_dirs;
+
+	retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+	if (retval < 0) {
+		kobject_put(&dir->kobj);
+		return NULL;
+	}
+	return &dir->kobj;
+}
+
+
 static struct kobject *get_device_parent(struct device *dev,
 					 struct device *parent)
 {
-	int retval;
-
 	if (dev->class) {
 		static DEFINE_MUTEX(gdp_mutex);
 		struct kobject *kobj = NULL;
@@ -635,18 +694,7 @@
 		}
 
 		/* or create a new class-directory at the parent device */
-		k = kobject_create();
-		if (!k) {
-			mutex_unlock(&gdp_mutex);
-			return NULL;
-		}
-		k->kset = &dev->class->p->class_dirs;
-		retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
-		if (retval < 0) {
-			mutex_unlock(&gdp_mutex);
-			kobject_put(k);
-			return NULL;
-		}
+		k = class_dir_create_and_add(dev->class, parent_kobj);
 		/* do not emit an uevent for this simple "glue" directory */
 		mutex_unlock(&gdp_mutex);
 		return k;
@@ -738,7 +786,7 @@
 out_busid:
 	if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
 	    device_is_not_partition(dev))
-		sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+		sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
 				  dev_name(dev));
 #else
 	/* link in the class directory pointing to the device */
@@ -756,7 +804,7 @@
 	return 0;
 
 out_busid:
-	sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+	sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
 #endif
 
 out_subsys:
@@ -784,13 +832,13 @@
 
 	if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
 	    device_is_not_partition(dev))
-		sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+		sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
 				  dev_name(dev));
 #else
 	if (dev->parent && device_is_not_partition(dev))
 		sysfs_remove_link(&dev->kobj, "device");
 
-	sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+	sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
 #endif
 
 	sysfs_remove_link(&dev->kobj, "subsystem");
@@ -1372,7 +1420,7 @@
 		return ERR_PTR(err);
 	}
 
-#ifdef CONFIG_MODULE	/* gotta find a "cleaner" way to do this */
+#ifdef CONFIG_MODULES	/* gotta find a "cleaner" way to do this */
 	if (owner) {
 		struct module_kobject *mk = &owner->mkobj;
 
@@ -1576,6 +1624,14 @@
 		goto out;
 	}
 
+#ifndef CONFIG_SYSFS_DEPRECATED
+	if (dev->class) {
+		error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+			&dev->kobj, old_device_name, new_name);
+		if (error)
+			goto out;
+	}
+#endif
 	error = kobject_rename(&dev->kobj, new_name);
 	if (error)
 		goto out;
@@ -1590,11 +1646,6 @@
 						  new_class_name);
 		}
 	}
-#else
-	if (dev->class) {
-		error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
-					  &dev->kobj, old_device_name, new_name);
-	}
 #endif
 
 out:
@@ -1735,10 +1786,25 @@
  */
 void device_shutdown(void)
 {
-	struct device *dev, *devn;
+	struct device *dev;
 
-	list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
-				kobj.entry) {
+	spin_lock(&devices_kset->list_lock);
+	/*
+	 * Walk the devices list backward, shutting down each in turn.
+	 * Beware that device unplug events may also start pulling
+	 * devices offline, even as the system is shutting down.
+	 */
+	while (!list_empty(&devices_kset->list)) {
+		dev = list_entry(devices_kset->list.prev, struct device,
+				kobj.entry);
+		get_device(dev);
+		/*
+		 * Make sure the device is off the kset list, in the
+		 * event that dev->*->shutdown() doesn't remove it.
+		 */
+		list_del_init(&dev->kobj.entry);
+		spin_unlock(&devices_kset->list_lock);
+
 		if (dev->bus && dev->bus->shutdown) {
 			dev_dbg(dev, "shutdown\n");
 			dev->bus->shutdown(dev);
@@ -1746,6 +1812,10 @@
 			dev_dbg(dev, "shutdown\n");
 			dev->driver->shutdown(dev);
 		}
+		put_device(dev);
+
+		spin_lock(&devices_kset->list_lock);
 	}
+	spin_unlock(&devices_kset->list_lock);
 	async_synchronize_full();
 }
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719a..251acea 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@
 	/* display offline cpus < nr_cpu_ids */
 	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_complement(offline, cpu_online_mask);
+	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
 	n = cpulist_scnprintf(buf, len, offline);
 	free_cpumask_var(offline);
 
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c89291f..503c262 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,11 +40,11 @@
 	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
 		 __func__, dev->driver->name);
 
+	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+
 	if (dev->bus)
 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 					     BUS_NOTIFY_BOUND_DRIVER, dev);
-
-	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
 }
 
 static int driver_sysfs_add(struct device *dev)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 057cf11..af06001 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -20,6 +20,7 @@
 #include <linux/namei.h>
 #include <linux/fs.h>
 #include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
 #include <linux/cred.h>
 #include <linux/sched.h>
 #include <linux/init_task.h>
@@ -45,7 +46,11 @@
 static int dev_get_sb(struct file_system_type *fs_type, int flags,
 		      const char *dev_name, void *data, struct vfsmount *mnt)
 {
+#ifdef CONFIG_TMPFS
 	return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+#else
+	return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+#endif
 }
 
 static struct file_system_type dev_fs_type = {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 985da11..3f093b0 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,52 @@
 MODULE_DESCRIPTION("Multi purpose firmware loading support");
 MODULE_LICENSE("GPL");
 
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+	struct builtin_fw *b_fw;
+
+	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+		if (strcmp(name, b_fw->name) == 0) {
+			fw->size = b_fw->size;
+			fw->data = b_fw->data;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+	struct builtin_fw *b_fw;
+
+	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+		if (fw->data == b_fw->data)
+			return true;
+
+	return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+	return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+	return false;
+}
+#endif
+
 enum {
 	FW_STATUS_LOADING,
 	FW_STATUS_DONE,
@@ -40,7 +86,6 @@
 static DEFINE_MUTEX(fw_lock);
 
 struct firmware_priv {
-	char *fw_id;
 	struct completion completion;
 	struct bin_attribute attr_data;
 	struct firmware *fw;
@@ -48,18 +93,11 @@
 	struct page **pages;
 	int nr_pages;
 	int page_array_size;
-	const char *vdata;
 	struct timer_list timeout;
+	bool nowait;
+	char fw_id[];
 };
 
-#ifdef CONFIG_FW_LOADER
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-#else /* Module case. Avoid ifdefs later; it'll all optimise out */
-static struct builtin_fw *__start_builtin_fw;
-static struct builtin_fw *__end_builtin_fw;
-#endif
-
 static void
 fw_load_abort(struct firmware_priv *fw_priv)
 {
@@ -100,9 +138,25 @@
 	return count;
 }
 
-static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
+static struct class_attribute firmware_class_attrs[] = {
+	__ATTR(timeout, S_IWUSR | S_IRUGO,
+		firmware_timeout_show, firmware_timeout_store),
+	__ATTR_NULL
+};
 
-static void fw_dev_release(struct device *dev);
+static void fw_dev_release(struct device *dev)
+{
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < fw_priv->nr_pages; i++)
+		__free_page(fw_priv->pages[i]);
+	kfree(fw_priv->pages);
+	kfree(fw_priv);
+	kfree(dev);
+
+	module_put(THIS_MODULE);
+}
 
 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
@@ -112,12 +166,15 @@
 		return -ENOMEM;
 	if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
 		return -ENOMEM;
+	if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+		return -ENOMEM;
 
 	return 0;
 }
 
 static struct class firmware_class = {
 	.name		= "firmware",
+	.class_attrs	= firmware_class_attrs,
 	.dev_uevent	= firmware_uevent,
 	.dev_release	= fw_dev_release,
 };
@@ -130,6 +187,17 @@
 	return sprintf(buf, "%d\n", loading);
 }
 
+static void firmware_free_data(const struct firmware *fw)
+{
+	int i;
+	vunmap(fw->data);
+	if (fw->pages) {
+		for (i = 0; i < PFN_UP(fw->size); i++)
+			__free_page(fw->pages[i]);
+		kfree(fw->pages);
+	}
+}
+
 /* Some architectures don't have PAGE_KERNEL_RO */
 #ifndef PAGE_KERNEL_RO
 #define PAGE_KERNEL_RO PAGE_KERNEL
@@ -162,21 +230,21 @@
 			mutex_unlock(&fw_lock);
 			break;
 		}
-		vfree(fw_priv->fw->data);
-		fw_priv->fw->data = NULL;
+		firmware_free_data(fw_priv->fw);
+		memset(fw_priv->fw, 0, sizeof(struct firmware));
+		/* If the pages are not owned by 'struct firmware' */
 		for (i = 0; i < fw_priv->nr_pages; i++)
 			__free_page(fw_priv->pages[i]);
 		kfree(fw_priv->pages);
 		fw_priv->pages = NULL;
 		fw_priv->page_array_size = 0;
 		fw_priv->nr_pages = 0;
-		fw_priv->fw->size = 0;
 		set_bit(FW_STATUS_LOADING, &fw_priv->status);
 		mutex_unlock(&fw_lock);
 		break;
 	case 0:
 		if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
-			vfree(fw_priv->fw->data);
+			vunmap(fw_priv->fw->data);
 			fw_priv->fw->data = vmap(fw_priv->pages,
 						 fw_priv->nr_pages,
 						 0, PAGE_KERNEL_RO);
@@ -184,7 +252,10 @@
 				dev_err(dev, "%s: vmap() failed\n", __func__);
 				goto err;
 			}
-			/* Pages will be freed by vfree() */
+			/* Pages are now owned by 'struct firmware' */
+			fw_priv->fw->pages = fw_priv->pages;
+			fw_priv->pages = NULL;
+
 			fw_priv->page_array_size = 0;
 			fw_priv->nr_pages = 0;
 			complete(&fw_priv->completion);
@@ -207,8 +278,9 @@
 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
 
 static ssize_t
-firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
-		   char *buffer, loff_t offset, size_t count)
+firmware_data_read(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr, char *buffer, loff_t offset,
+		   size_t count)
 {
 	struct device *dev = to_dev(kobj);
 	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -291,6 +363,7 @@
 
 /**
  * firmware_data_write - write method for firmware
+ * @filp: open sysfs file
  * @kobj: kobject for the device
  * @bin_attr: bin_attr structure
  * @buffer: buffer being written
@@ -301,8 +374,9 @@
  *	the driver as a firmware image.
  **/
 static ssize_t
-firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
-		    char *buffer, loff_t offset, size_t count)
+firmware_data_write(struct file* filp, struct kobject *kobj,
+		    struct bin_attribute *bin_attr, char *buffer,
+		    loff_t offset, size_t count)
 {
 	struct device *dev = to_dev(kobj);
 	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -353,21 +427,6 @@
 	.write = firmware_data_write,
 };
 
-static void fw_dev_release(struct device *dev)
-{
-	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
-	int i;
-
-	for (i = 0; i < fw_priv->nr_pages; i++)
-		__free_page(fw_priv->pages[i]);
-	kfree(fw_priv->pages);
-	kfree(fw_priv->fw_id);
-	kfree(fw_priv);
-	kfree(dev);
-
-	module_put(THIS_MODULE);
-}
-
 static void
 firmware_class_timeout(u_long data)
 {
@@ -379,8 +438,8 @@
 			      struct device *device)
 {
 	int retval;
-	struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
-						GFP_KERNEL);
+	struct firmware_priv *fw_priv =
+		kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
 	struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
 
 	*dev_p = NULL;
@@ -391,16 +450,9 @@
 		goto error_kfree;
 	}
 
+	strcpy(fw_priv->fw_id, fw_name);
 	init_completion(&fw_priv->completion);
 	fw_priv->attr_data = firmware_attr_data_tmpl;
-	fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
-	if (!fw_priv->fw_id) {
-		dev_err(device, "%s: Firmware name allocation failed\n",
-			__func__);
-		retval = -ENOMEM;
-		goto error_kfree;
-	}
-
 	fw_priv->timeout.function = firmware_class_timeout;
 	fw_priv->timeout.data = (u_long) fw_priv;
 	init_timer(&fw_priv->timeout);
@@ -427,7 +479,7 @@
 
 static int fw_setup_device(struct firmware *fw, struct device **dev_p,
 			   const char *fw_name, struct device *device,
-			   int uevent)
+			   int uevent, bool nowait)
 {
 	struct device *f_dev;
 	struct firmware_priv *fw_priv;
@@ -443,6 +495,8 @@
 
 	fw_priv = dev_get_drvdata(f_dev);
 
+	fw_priv->nowait = nowait;
+
 	fw_priv->fw = fw;
 	sysfs_bin_attr_init(&fw_priv->attr_data);
 	retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
@@ -470,12 +524,11 @@
 
 static int
 _request_firmware(const struct firmware **firmware_p, const char *name,
-		 struct device *device, int uevent)
+		 struct device *device, int uevent, bool nowait)
 {
 	struct device *f_dev;
 	struct firmware_priv *fw_priv;
 	struct firmware *firmware;
-	struct builtin_fw *builtin;
 	int retval;
 
 	if (!firmware_p)
@@ -489,21 +542,16 @@
 		goto out;
 	}
 
-	for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
-	     builtin++) {
-		if (strcmp(name, builtin->name))
-			continue;
-		dev_info(device, "firmware: using built-in firmware %s\n",
-			 name);
-		firmware->size = builtin->size;
-		firmware->data = builtin->data;
+	if (fw_get_builtin_firmware(firmware, name)) {
+		dev_dbg(device, "firmware: using built-in firmware %s\n", name);
 		return 0;
 	}
 
 	if (uevent)
-		dev_info(device, "firmware: requesting %s\n", name);
+		dev_dbg(device, "firmware: requesting %s\n", name);
 
-	retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
+	retval = fw_setup_device(firmware, &f_dev, name, device,
+				 uevent, nowait);
 	if (retval)
 		goto error_kfree_fw;
 
@@ -560,26 +608,18 @@
                  struct device *device)
 {
         int uevent = 1;
-        return _request_firmware(firmware_p, name, device, uevent);
+        return _request_firmware(firmware_p, name, device, uevent, false);
 }
 
 /**
  * release_firmware: - release the resource associated with a firmware image
  * @fw: firmware resource to release
  **/
-void
-release_firmware(const struct firmware *fw)
+void release_firmware(const struct firmware *fw)
 {
-	struct builtin_fw *builtin;
-
 	if (fw) {
-		for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
-		     builtin++) {
-			if (fw->data == builtin->data)
-				goto free_fw;
-		}
-		vfree(fw->data);
-	free_fw:
+		if (!fw_is_builtin_firmware(fw))
+			firmware_free_data(fw);
 		kfree(fw);
 	}
 }
@@ -606,7 +646,7 @@
 		return 0;
 	}
 	ret = _request_firmware(&fw, fw_work->name, fw_work->device,
-		fw_work->uevent);
+		fw_work->uevent, true);
 
 	fw_work->cont(fw, fw_work->context);
 
@@ -670,26 +710,12 @@
 	return 0;
 }
 
-static int __init
-firmware_class_init(void)
+static int __init firmware_class_init(void)
 {
-	int error;
-	error = class_register(&firmware_class);
-	if (error) {
-		printk(KERN_ERR "%s: class_register failed\n", __func__);
-		return error;
-	}
-	error = class_create_file(&firmware_class, &class_attr_timeout);
-	if (error) {
-		printk(KERN_ERR "%s: class_create_file failed\n",
-		       __func__);
-		class_unregister(&firmware_class);
-	}
-	return error;
-
+	return class_register(&firmware_class);
 }
-static void __exit
-firmware_class_exit(void)
+
+static void __exit firmware_class_exit(void)
 {
 	class_unregister(&firmware_class);
 }
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f32f2f9..db930d3 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -15,12 +15,10 @@
 {
 	char *driver_name;
 
-	driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
-			      GFP_KERNEL);
+	driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
 	if (!driver_name)
 		return NULL;
 
-	sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
 	return driver_name;
 }
 
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ada6397..4d99c8b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -735,7 +735,7 @@
 
 #ifdef CONFIG_SUSPEND
 
-static int platform_pm_suspend(struct device *dev)
+int __weak platform_pm_suspend(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
@@ -753,7 +753,7 @@
 	return ret;
 }
 
-static int platform_pm_suspend_noirq(struct device *dev)
+int __weak platform_pm_suspend_noirq(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
@@ -769,7 +769,7 @@
 	return ret;
 }
 
-static int platform_pm_resume(struct device *dev)
+int __weak platform_pm_resume(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
@@ -787,7 +787,7 @@
 	return ret;
 }
 
-static int platform_pm_resume_noirq(struct device *dev)
+int __weak platform_pm_resume_noirq(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77bfce5..de27768 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -76,6 +76,17 @@
 
 	  It's pretty unlikely that you have one of these: say N.
 
+config GDROM
+	tristate "SEGA Dreamcast GD-ROM drive"
+	depends on SH_DREAMCAST
+	help
+	  A standard SEGA Dreamcast comes with a modified CD ROM drive called a
+	  "GD-ROM" by SEGA to signify it is capable of reading special disks
+	  with up to 1 GB of data. This drive will also read standard CD ROM
+	  disks. Select this option to access any disks in your GD ROM drive.
+	  Most users will want to say "Y" here.
+	  You can also build this as a module which will be called gdrom.
+
 config PARIDE
 	tristate "Parallel port IDE device support"
 	depends on PARPORT_PC
@@ -103,17 +114,6 @@
 	  "MicroSolutions backpack protocol", "DataStor Commuter protocol"
 	  etc.).
 
-config GDROM
-	tristate "SEGA Dreamcast GD-ROM drive"
-	depends on SH_DREAMCAST
-	help
-	  A standard SEGA Dreamcast comes with a modified CD ROM drive called a
-	  "GD-ROM" by SEGA to signify it is capable of reading special disks
-	  with up to 1 GB of data. This drive will also read standard CD ROM
-	  disks. Select this option to access any disks in your GD ROM drive.
-	  Most users will want to say "Y" here.
-	  You can also build this as a module which will be called gdrom.
-
 source "drivers/block/paride/Kconfig"
 
 config BLK_CPQ_DA
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3390716..e3f88d6 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -84,6 +84,9 @@
 #define BM_MD_IO_ERROR  1
 #define BM_P_VMALLOCED  2
 
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+			       unsigned long e, int val, const enum km_type km);
+
 static int bm_is_locked(struct drbd_bitmap *b)
 {
 	return test_bit(BM_LOCKED, &b->bm_flags);
@@ -441,7 +444,7 @@
  * In case this is actually a resize, we copy the old bitmap into the new one.
  * Otherwise, the bitmap is initialized to all bits set.
  */
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
+int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
 {
 	struct drbd_bitmap *b = mdev->bitmap;
 	unsigned long bits, words, owords, obits, *p_addr, *bm;
@@ -516,7 +519,7 @@
 	obits  = b->bm_bits;
 
 	growing = bits > obits;
-	if (opages)
+	if (opages && growing && set_new_bits)
 		bm_set_surplus(b);
 
 	b->bm_pages = npages;
@@ -526,8 +529,12 @@
 	b->bm_dev_capacity = capacity;
 
 	if (growing) {
-		bm_memset(b, owords, 0xff, words-owords);
-		b->bm_set += bits - obits;
+		if (set_new_bits) {
+			bm_memset(b, owords, 0xff, words-owords);
+			b->bm_set += bits - obits;
+		} else
+			bm_memset(b, owords, 0x00, words-owords);
+
 	}
 
 	if (want < have) {
@@ -773,7 +780,7 @@
 	/* nothing to do, on disk == in memory */
 # define bm_cpu_to_lel(x) ((void)0)
 # else
-void bm_cpu_to_lel(struct drbd_bitmap *b)
+static void bm_cpu_to_lel(struct drbd_bitmap *b)
 {
 	/* need to cpu_to_lel all the pages ...
 	 * this may be optimized by using
@@ -1015,7 +1022,7 @@
  * wants bitnr, not sector.
  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
  * Must hold bitmap lock already. */
-int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
 	unsigned long e, int val, const enum km_type km)
 {
 	struct drbd_bitmap *b = mdev->bitmap;
@@ -1053,7 +1060,7 @@
  * for val != 0, we change 0 -> 1, return code positive
  * for val == 0, we change 1 -> 0, return code negative
  * wants bitnr, not sector */
-int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
 	const unsigned long e, int val)
 {
 	unsigned long flags;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e5e86a7..e9654c8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -132,6 +132,7 @@
 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
+	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
 
 	DRBD_FAULT_MAX,
 };
@@ -208,8 +209,11 @@
 	P_RS_IS_IN_SYNC	      = 0x22, /* meta socket */
 	P_SYNC_PARAM89	      = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
 	P_COMPRESSED_BITMAP   = 0x24, /* compressed or otherwise encoded bitmap transfer */
+	/* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
+	/* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
+	P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
 
-	P_MAX_CMD	      = 0x25,
+	P_MAX_CMD	      = 0x28,
 	P_MAY_IGNORE	      = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
 	P_MAX_OPT_CMD	      = 0x101,
 
@@ -264,6 +268,7 @@
 		[P_CSUM_RS_REQUEST]     = "CsumRSRequest",
 		[P_RS_IS_IN_SYNC]	= "CsumRSIsInSync",
 		[P_COMPRESSED_BITMAP]   = "CBitmap",
+		[P_DELAY_PROBE]         = "DelayProbe",
 		[P_MAX_CMD]	        = NULL,
 	};
 
@@ -481,7 +486,8 @@
 	u64	    u_size;  /* user requested size */
 	u64	    c_size;  /* current exported size */
 	u32	    max_segment_size;  /* Maximal size of a BIO */
-	u32	    queue_order_type;
+	u16	    queue_order_type;  /* not yet implemented in DRBD*/
+	u16	    dds_flags; /* use enum dds_flags here. */
 } __packed;
 
 struct p_state {
@@ -538,6 +544,18 @@
 	u8 code[0];
 } __packed;
 
+struct p_delay_probe {
+	struct p_header head;
+	u32	seq_num; /* sequence number to match the two probe packets */
+	u32	offset;	 /* usecs the probe got sent after the reference time point */
+} __packed;
+
+struct delay_probe {
+	struct list_head list;
+	unsigned int seq_num;
+	struct timeval time;
+};
+
 /* DCBP: Drbd Compressed Bitmap Packet ... */
 static inline enum drbd_bitmap_code
 DCBP_get_code(struct p_compressed_bm *p)
@@ -722,22 +740,6 @@
 	EV_CLEANUP = 32, /* used as flag */
 };
 
-struct drbd_epoch_entry {
-	struct drbd_work    w;
-	struct drbd_conf *mdev;
-	struct bio *private_bio;
-	struct hlist_node colision;
-	sector_t sector;
-	unsigned int size;
-	struct drbd_epoch *epoch;
-
-	/* up to here, the struct layout is identical to drbd_request;
-	 * we might be able to use that to our advantage...  */
-
-	unsigned int flags;
-	u64    block_id;
-};
-
 struct drbd_wq_barrier {
 	struct drbd_work w;
 	struct completion done;
@@ -748,17 +750,49 @@
 	void *digest;
 };
 
-/* ee flag bits */
+struct drbd_epoch_entry {
+	struct drbd_work w;
+	struct hlist_node colision;
+	struct drbd_epoch *epoch;
+	struct drbd_conf *mdev;
+	struct page *pages;
+	atomic_t pending_bios;
+	unsigned int size;
+	/* see comments on ee flag bits below */
+	unsigned long flags;
+	sector_t sector;
+	u64 block_id;
+};
+
+/* ee flag bits.
+ * While corresponding bios are in flight, the only modification will be
+ * set_bit WAS_ERROR, which has to be atomic.
+ * If no bios are in flight yet, or all have been completed,
+ * non-atomic modification to ee->flags is ok.
+ */
 enum {
 	__EE_CALL_AL_COMPLETE_IO,
-	__EE_CONFLICT_PENDING,
 	__EE_MAY_SET_IN_SYNC,
+
+	/* This epoch entry closes an epoch using a barrier.
+	 * On sucessful completion, the epoch is released,
+	 * and the P_BARRIER_ACK send. */
 	__EE_IS_BARRIER,
+
+	/* In case a barrier failed,
+	 * we need to resubmit without the barrier flag. */
+	__EE_RESUBMITTED,
+
+	/* we may have several bios per epoch entry.
+	 * if any of those fail, we set this flag atomically
+	 * from the endio callback */
+	__EE_WAS_ERROR,
 };
 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
-#define EE_CONFLICT_PENDING    (1<<__EE_CONFLICT_PENDING)
 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
 #define EE_IS_BARRIER          (1<<__EE_IS_BARRIER)
+#define	EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
+#define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
 
 /* global flag bits */
 enum {
@@ -908,9 +942,12 @@
 	unsigned int ko_count;
 	struct drbd_work  resync_work,
 			  unplug_work,
-			  md_sync_work;
+			  md_sync_work,
+			  delay_probe_work,
+			  uuid_work;
 	struct timer_list resync_timer;
 	struct timer_list md_sync_timer;
+	struct timer_list delay_probe_timer;
 
 	/* Used after attach while negotiating new disk state. */
 	union drbd_state new_state_tmp;
@@ -1026,6 +1063,13 @@
 	u64 ed_uuid; /* UUID of the exposed data */
 	struct mutex state_mutex;
 	char congestion_reason;  /* Why we where congested... */
+	struct list_head delay_probes; /* protected by peer_seq_lock */
+	int data_delay;   /* Delay of packets on the data-sock behind meta-sock */
+	unsigned int delay_seq; /* To generate sequence numbers of delay probes */
+	struct timeval dps_time; /* delay-probes-start-time */
+	unsigned int dp_volume_last;  /* send_cnt of last delay probe */
+	int c_sync_rate; /* current resync rate after delay_probe magic */
+	atomic_t new_c_uuid;
 };
 
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1081,6 +1125,11 @@
 	CS_ORDERED      = CS_WAIT_COMPLETE + CS_SERIALIZE,
 };
 
+enum dds_flags {
+	DDSF_FORCED    = 1,
+	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
+};
+
 extern void drbd_init_set_defaults(struct drbd_conf *mdev);
 extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
 			union drbd_state mask, union drbd_state val);
@@ -1113,7 +1162,7 @@
 extern int drbd_send_uuids(struct drbd_conf *mdev);
 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
 extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply);
+extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
 extern int _drbd_send_state(struct drbd_conf *mdev);
 extern int drbd_send_state(struct drbd_conf *mdev);
 extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
@@ -1311,7 +1360,7 @@
 #define APP_R_HSIZE 15
 
 extern int  drbd_bm_init(struct drbd_conf *mdev);
-extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors);
+extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
 extern void drbd_bm_cleanup(struct drbd_conf *mdev);
 extern void drbd_bm_set_all(struct drbd_conf *mdev);
 extern void drbd_bm_clear_all(struct drbd_conf *mdev);
@@ -1383,7 +1432,7 @@
 extern char *ppsize(char *buf, unsigned long long size);
 extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
 extern void resync_after_online_grow(struct drbd_conf *);
 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1414,7 +1463,8 @@
 }
 
 
-extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
 /* worker callbacks */
 extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
 extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
@@ -1438,6 +1488,8 @@
 extern void resync_timer_fn(unsigned long data);
 
 /* drbd_receiver.c */
+extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+		const unsigned rw, const int fault_type);
 extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
 extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
 					    u64 id,
@@ -1593,6 +1645,41 @@
  * inline helper functions
  *************************/
 
+/* see also page_chain_add and friends in drbd_receiver.c */
+static inline struct page *page_chain_next(struct page *page)
+{
+	return (struct page *)page_private(page);
+}
+#define page_chain_for_each(page) \
+	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
+			page = page_chain_next(page))
+#define page_chain_for_each_safe(page, n) \
+	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
+
+static inline int drbd_bio_has_active_page(struct bio *bio)
+{
+	struct bio_vec *bvec;
+	int i;
+
+	__bio_for_each_segment(bvec, bio, i, 0) {
+		if (page_count(bvec->bv_page) > 1)
+			return 1;
+	}
+
+	return 0;
+}
+
+static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+{
+	struct page *page = e->pages;
+	page_chain_for_each(page) {
+		if (page_count(page) > 1)
+			return 1;
+	}
+	return 0;
+}
+
+
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 {
 	wait_event(mdev->misc_wait,
@@ -2132,13 +2219,15 @@
 		return 0;
 	if (test_bit(BITMAP_IO, &mdev->flags))
 		return 0;
+	if (atomic_read(&mdev->new_c_uuid))
+		return 0;
 	return 1;
 }
 
 /* I'd like to use wait_event_lock_irq,
  * but I'm not sure when it got introduced,
  * and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
 {
 	/* compare with after_state_ch,
 	 * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
@@ -2152,6 +2241,9 @@
 	 * to avoid races with the reconnect code,
 	 * we need to atomic_inc within the spinlock. */
 
+	if (atomic_read(&mdev->new_c_uuid) && atomic_add_unless(&mdev->new_c_uuid, -1, 1))
+		drbd_queue_work_front(&mdev->data.work, &mdev->uuid_work);
+
 	spin_lock_irq(&mdev->req_lock);
 	while (!__inc_ap_bio_cond(mdev)) {
 		prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
@@ -2160,7 +2252,7 @@
 		finish_wait(&mdev->misc_wait, &wait);
 		spin_lock_irq(&mdev->req_lock);
 	}
-	atomic_add(one_or_two, &mdev->ap_bio_cnt);
+	atomic_add(count, &mdev->ap_bio_cnt);
 	spin_unlock_irq(&mdev->req_lock);
 }
 
@@ -2251,7 +2343,8 @@
 	if (test_bit(MD_NO_BARRIER, &mdev->flags))
 		return;
 
-	r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
+			BLKDEV_IFL_WAIT);
 	if (r) {
 		set_bit(MD_NO_BARRIER, &mdev->flags);
 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 93d1f9b..be2d2da 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -684,6 +684,9 @@
 	else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
 		rv = SS_NO_REMOTE_DISK;
 
+	else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+		rv = SS_NO_UP_TO_DATE_DISK;
+
 	else if ((ns.conn == C_CONNECTED ||
 		  ns.conn == C_WF_BITMAP_S ||
 		  ns.conn == C_SYNC_SOURCE ||
@@ -840,7 +843,12 @@
 			break;
 		case C_WF_BITMAP_S:
 		case C_PAUSED_SYNC_S:
-			ns.pdsk = D_OUTDATED;
+			/* remap any consistent state to D_OUTDATED,
+			 * but disallow "upgrade" of not even consistent states.
+			 */
+			ns.pdsk =
+				(D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
+				? os.pdsk : D_OUTDATED;
 			break;
 		case C_SYNC_SOURCE:
 			ns.pdsk = D_INCONSISTENT;
@@ -1205,21 +1213,20 @@
 	&&  (ns.pdsk < D_INCONSISTENT ||
 	     ns.pdsk == D_UNKNOWN ||
 	     ns.pdsk == D_OUTDATED)) {
-		kfree(mdev->p_uuid);
-		mdev->p_uuid = NULL;
 		if (get_ldev(mdev)) {
 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
-			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
-				drbd_uuid_new_current(mdev);
-				drbd_send_uuids(mdev);
-			}
+			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE &&
+			    !atomic_read(&mdev->new_c_uuid))
+				atomic_set(&mdev->new_c_uuid, 2);
 			put_ldev(mdev);
 		}
 	}
 
 	if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
-		if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
-			drbd_uuid_new_current(mdev);
+		/* Diskless peer becomes primary or got connected do diskless, primary peer. */
+		if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0 &&
+		    !atomic_read(&mdev->new_c_uuid))
+			atomic_set(&mdev->new_c_uuid, 2);
 
 		/* D_DISKLESS Peer becomes secondary */
 		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1232,7 +1239,7 @@
 	    os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
 		kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
 		mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
-		drbd_send_sizes(mdev, 0);  /* to start sync... */
+		drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
 		drbd_send_uuids(mdev);
 		drbd_send_state(mdev);
 	}
@@ -1343,6 +1350,24 @@
 	drbd_md_sync(mdev);
 }
 
+static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+	if (get_ldev(mdev)) {
+		if (mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+			drbd_uuid_new_current(mdev);
+			if (get_net_conf(mdev)) {
+				drbd_send_uuids(mdev);
+				put_net_conf(mdev);
+			}
+			drbd_md_sync(mdev);
+		}
+		put_ldev(mdev);
+	}
+	atomic_dec(&mdev->new_c_uuid);
+	wake_up(&mdev->misc_wait);
+
+	return 1;
+}
 
 static int drbd_thread_setup(void *arg)
 {
@@ -1755,7 +1780,7 @@
 			     (struct p_header *)&p, sizeof(p));
 }
 
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
+int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
 {
 	struct p_sizes p;
 	sector_t d_size, u_size;
@@ -1767,7 +1792,6 @@
 		d_size = drbd_get_max_capacity(mdev->ldev);
 		u_size = mdev->ldev->dc.disk_size;
 		q_order_type = drbd_queue_order_type(mdev);
-		p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
 		put_ldev(mdev);
 	} else {
 		d_size = 0;
@@ -1779,7 +1803,8 @@
 	p.u_size = cpu_to_be64(u_size);
 	p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
 	p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
-	p.queue_order_type = cpu_to_be32(q_order_type);
+	p.queue_order_type = cpu_to_be16(q_order_type);
+	p.dds_flags = cpu_to_be16(flags);
 
 	ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
 			   (struct p_header *)&p, sizeof(p));
@@ -2180,6 +2205,43 @@
 	return ok;
 }
 
+static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
+{
+	struct p_delay_probe dp;
+	int offset, ok = 0;
+	struct timeval now;
+
+	mutex_lock(&ds->mutex);
+	if (likely(ds->socket)) {
+		do_gettimeofday(&now);
+		offset = now.tv_usec - mdev->dps_time.tv_usec +
+			 (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
+		dp.seq_num  = cpu_to_be32(mdev->delay_seq);
+		dp.offset   = cpu_to_be32(offset);
+
+		ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
+				    (struct p_header *)&dp, sizeof(dp), 0);
+	}
+	mutex_unlock(&ds->mutex);
+
+	return ok;
+}
+
+static int drbd_send_delay_probes(struct drbd_conf *mdev)
+{
+	int ok;
+
+	mdev->delay_seq++;
+	do_gettimeofday(&mdev->dps_time);
+	ok = drbd_send_delay_probe(mdev, &mdev->meta);
+	ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
+
+	mdev->dp_volume_last = mdev->send_cnt;
+	mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
+
+	return ok;
+}
+
 /* called on sndtimeo
  * returns FALSE if we should retry,
  * TRUE if we think connection is dead
@@ -2309,6 +2371,44 @@
 	return 1;
 }
 
+static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+{
+	struct page *page = e->pages;
+	unsigned len = e->size;
+	page_chain_for_each(page) {
+		unsigned l = min_t(unsigned, len, PAGE_SIZE);
+		if (!_drbd_send_page(mdev, page, 0, l))
+			return 0;
+		len -= l;
+	}
+	return 1;
+}
+
+static void consider_delay_probes(struct drbd_conf *mdev)
+{
+	if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
+		return;
+
+	if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
+		drbd_send_delay_probes(mdev);
+}
+
+static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+	if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
+		drbd_send_delay_probes(mdev);
+
+	return 1;
+}
+
+static void delay_probe_timer_fn(unsigned long data)
+{
+	struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+	if (list_empty(&mdev->delay_probe_work.list))
+		drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
+}
+
 /* Used to send write requests
  * R_PRIMARY -> Peer	(P_DATA)
  */
@@ -2360,7 +2460,7 @@
 		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
 	if (ok && dgs) {
 		dgb = mdev->int_dig_out;
-		drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
+		drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
 		ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
 	}
 	if (ok) {
@@ -2371,6 +2471,10 @@
 	}
 
 	drbd_put_data_sock(mdev);
+
+	if (ok)
+		consider_delay_probes(mdev);
+
 	return ok;
 }
 
@@ -2409,13 +2513,17 @@
 					sizeof(p), MSG_MORE);
 	if (ok && dgs) {
 		dgb = mdev->int_dig_out;
-		drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb);
+		drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
 		ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
 	}
 	if (ok)
-		ok = _drbd_send_zc_bio(mdev, e->private_bio);
+		ok = _drbd_send_zc_ee(mdev, e);
 
 	drbd_put_data_sock(mdev);
+
+	if (ok)
+		consider_delay_probes(mdev);
+
 	return ok;
 }
 
@@ -2600,6 +2708,7 @@
 	atomic_set(&mdev->net_cnt, 0);
 	atomic_set(&mdev->packet_seq, 0);
 	atomic_set(&mdev->pp_in_use, 0);
+	atomic_set(&mdev->new_c_uuid, 0);
 
 	mutex_init(&mdev->md_io_mutex);
 	mutex_init(&mdev->data.mutex);
@@ -2628,16 +2737,26 @@
 	INIT_LIST_HEAD(&mdev->unplug_work.list);
 	INIT_LIST_HEAD(&mdev->md_sync_work.list);
 	INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
+	INIT_LIST_HEAD(&mdev->delay_probes);
+	INIT_LIST_HEAD(&mdev->delay_probe_work.list);
+	INIT_LIST_HEAD(&mdev->uuid_work.list);
+
 	mdev->resync_work.cb  = w_resync_inactive;
 	mdev->unplug_work.cb  = w_send_write_hint;
 	mdev->md_sync_work.cb = w_md_sync;
 	mdev->bm_io_work.w.cb = w_bitmap_io;
+	mdev->delay_probe_work.cb = w_delay_probes;
+	mdev->uuid_work.cb = w_new_current_uuid;
 	init_timer(&mdev->resync_timer);
 	init_timer(&mdev->md_sync_timer);
+	init_timer(&mdev->delay_probe_timer);
 	mdev->resync_timer.function = resync_timer_fn;
 	mdev->resync_timer.data = (unsigned long) mdev;
 	mdev->md_sync_timer.function = md_sync_timer_fn;
 	mdev->md_sync_timer.data = (unsigned long) mdev;
+	mdev->delay_probe_timer.function = delay_probe_timer_fn;
+	mdev->delay_probe_timer.data = (unsigned long) mdev;
+
 
 	init_waitqueue_head(&mdev->misc_wait);
 	init_waitqueue_head(&mdev->state_wait);
@@ -2680,7 +2799,7 @@
 	drbd_set_my_capacity(mdev, 0);
 	if (mdev->bitmap) {
 		/* maybe never allocated. */
-		drbd_bm_resize(mdev, 0);
+		drbd_bm_resize(mdev, 0, 1);
 		drbd_bm_cleanup(mdev);
 	}
 
@@ -3129,7 +3248,7 @@
 	if (err)
 		goto Enomem;
 
-	drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops);
+	drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
 	if (!drbd_proc)	{
 		printk(KERN_ERR "drbd: unable to register proc file\n");
 		goto Enomem;
@@ -3660,7 +3779,8 @@
 		[DRBD_FAULT_DT_RD] = "Data read",
 		[DRBD_FAULT_DT_RA] = "Data read ahead",
 		[DRBD_FAULT_BM_ALLOC] = "BM allocation",
-		[DRBD_FAULT_AL_EE] = "EE allocation"
+		[DRBD_FAULT_AL_EE] = "EE allocation",
+		[DRBD_FAULT_RECEIVE] = "receive data corruption",
 	};
 
 	return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6429d2b..632e324 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@
  * Returns 0 on success, negative return values indicate errors.
  * You should call drbd_md_sync() after calling this function.
  */
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
 {
 	sector_t prev_first_sect, prev_size; /* previous meta location */
 	sector_t la_size;
@@ -541,12 +541,12 @@
 	/* TODO: should only be some assert here, not (re)init... */
 	drbd_md_set_sector_offsets(mdev, mdev->ldev);
 
-	size = drbd_new_dev_size(mdev, mdev->ldev, force);
+	size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
 
 	if (drbd_get_capacity(mdev->this_bdev) != size ||
 	    drbd_bm_capacity(mdev) != size) {
 		int err;
-		err = drbd_bm_resize(mdev, size);
+		err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
 		if (unlikely(err)) {
 			/* currently there is only one error: ENOMEM! */
 			size = drbd_bm_capacity(mdev)>>1;
@@ -704,9 +704,6 @@
 	struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
 	int max_segments = mdev->ldev->dc.max_bio_bvecs;
 
-	if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
-		max_seg_s = PAGE_SIZE;
-
 	max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
 
 	blk_queue_max_hw_sectors(q, max_seg_s >> 9);
@@ -1199,13 +1196,12 @@
 	}
 
 	/* allocation not in the IO path, cqueue thread context */
-	new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+	new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
 	if (!new_conf) {
 		retcode = ERR_NOMEM;
 		goto fail;
 	}
 
-	memset(new_conf, 0, sizeof(struct net_conf));
 	new_conf->timeout	   = DRBD_TIMEOUT_DEF;
 	new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
 	new_conf->ping_int	   = DRBD_PING_INT_DEF;
@@ -1477,8 +1473,8 @@
 {
 	struct resize rs;
 	int retcode = NO_ERROR;
-	int ldsc = 0; /* local disk size changed */
 	enum determine_dev_size dd;
+	enum dds_flags ddsf;
 
 	memset(&rs, 0, sizeof(struct resize));
 	if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
@@ -1502,13 +1498,17 @@
 		goto fail;
 	}
 
-	if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
-		mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
-		ldsc = 1;
+	if (rs.no_resync && mdev->agreed_pro_version < 93) {
+		retcode = ERR_NEED_APV_93;
+		goto fail;
 	}
 
+	if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
+		mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+
 	mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
-	dd = drbd_determin_dev_size(mdev, rs.resize_force);
+	ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
+	dd = drbd_determin_dev_size(mdev, ddsf);
 	drbd_md_sync(mdev);
 	put_ldev(mdev);
 	if (dd == dev_size_error) {
@@ -1516,12 +1516,12 @@
 		goto fail;
 	}
 
-	if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
+	if (mdev->state.conn == C_CONNECTED) {
 		if (dd == grew)
 			set_bit(RESIZE_PENDING, &mdev->flags);
 
 		drbd_send_uuids(mdev);
-		drbd_send_sizes(mdev, 1);
+		drbd_send_sizes(mdev, 1, ddsf);
 	}
 
  fail:
@@ -1551,6 +1551,10 @@
 		sc.rate       = DRBD_RATE_DEF;
 		sc.after      = DRBD_AFTER_DEF;
 		sc.al_extents = DRBD_AL_EXTENTS_DEF;
+		sc.dp_volume  = DRBD_DP_VOLUME_DEF;
+		sc.dp_interval = DRBD_DP_INTERVAL_DEF;
+		sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
+		sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
 	} else
 		memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
 
@@ -2207,9 +2211,9 @@
 {
 	struct cn_msg *cn_reply;
 	struct drbd_nl_cfg_reply *reply;
-	struct bio_vec *bvec;
 	unsigned short *tl;
-	int i;
+	struct page *page;
+	unsigned len;
 
 	if (!e)
 		return;
@@ -2247,11 +2251,15 @@
 	put_unaligned(T_ee_data, tl++);
 	put_unaligned(e->size, tl++);
 
-	__bio_for_each_segment(bvec, e->private_bio, i, 0) {
-		void *d = kmap(bvec->bv_page);
-		memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
-		kunmap(bvec->bv_page);
-		tl=(unsigned short*)((char*)tl + bvec->bv_len);
+	len = e->size;
+	page = e->pages;
+	page_chain_for_each(page) {
+		void *d = kmap_atomic(page, KM_USER0);
+		unsigned l = min_t(unsigned, len, PAGE_SIZE);
+		memcpy(tl, d, l);
+		kunmap_atomic(d, KM_USER0);
+		tl = (unsigned short*)((char*)tl + l);
+		len -= l;
 	}
 	put_unaligned(TT_END, tl++); /* Close the tag list */
 
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be3374b..d0f1767 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -73,14 +73,21 @@
 	seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
 	/* if more than 1 GB display in MB */
 	if (mdev->rs_total > 0x100000L)
-		seq_printf(seq, "(%lu/%lu)M\n\t",
+		seq_printf(seq, "(%lu/%lu)M",
 			    (unsigned long) Bit2KB(rs_left >> 10),
 			    (unsigned long) Bit2KB(mdev->rs_total >> 10));
 	else
-		seq_printf(seq, "(%lu/%lu)K\n\t",
+		seq_printf(seq, "(%lu/%lu)K",
 			    (unsigned long) Bit2KB(rs_left),
 			    (unsigned long) Bit2KB(mdev->rs_total));
 
+	if (mdev->state.conn == C_SYNC_TARGET)
+		seq_printf(seq, " queue_delay: %d.%d ms\n\t",
+			   mdev->data_delay / 1000,
+			   (mdev->data_delay % 1000) / 100);
+	else if (mdev->state.conn == C_SYNC_SOURCE)
+		seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
+
 	/* see drivers/md/md.c
 	 * We do not want to overflow, so the order of operands and
 	 * the * 100 / 100 trick are important. We do a +1 to be
@@ -128,6 +135,14 @@
 	else
 		seq_printf(seq, " (%ld)", dbdt);
 
+	if (mdev->state.conn == C_SYNC_TARGET) {
+		if (mdev->c_sync_rate > 1000)
+			seq_printf(seq, " want: %d,%03d",
+				   mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
+		else
+			seq_printf(seq, " want: %d", mdev->c_sync_rate);
+	}
+
 	seq_printf(seq, " K/sec\n");
 }
 
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3f096e79..bc9ab7f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -80,30 +80,128 @@
 
 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
 
-static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
+/*
+ * some helper functions to deal with single linked page lists,
+ * page->private being our "next" pointer.
+ */
+
+/* If at least n pages are linked at head, get n pages off.
+ * Otherwise, don't modify head, and return NULL.
+ * Locking is the responsibility of the caller.
+ */
+static struct page *page_chain_del(struct page **head, int n)
+{
+	struct page *page;
+	struct page *tmp;
+
+	BUG_ON(!n);
+	BUG_ON(!head);
+
+	page = *head;
+
+	if (!page)
+		return NULL;
+
+	while (page) {
+		tmp = page_chain_next(page);
+		if (--n == 0)
+			break; /* found sufficient pages */
+		if (tmp == NULL)
+			/* insufficient pages, don't use any of them. */
+			return NULL;
+		page = tmp;
+	}
+
+	/* add end of list marker for the returned list */
+	set_page_private(page, 0);
+	/* actual return value, and adjustment of head */
+	page = *head;
+	*head = tmp;
+	return page;
+}
+
+/* may be used outside of locks to find the tail of a (usually short)
+ * "private" page chain, before adding it back to a global chain head
+ * with page_chain_add() under a spinlock. */
+static struct page *page_chain_tail(struct page *page, int *len)
+{
+	struct page *tmp;
+	int i = 1;
+	while ((tmp = page_chain_next(page)))
+		++i, page = tmp;
+	if (len)
+		*len = i;
+	return page;
+}
+
+static int page_chain_free(struct page *page)
+{
+	struct page *tmp;
+	int i = 0;
+	page_chain_for_each_safe(page, tmp) {
+		put_page(page);
+		++i;
+	}
+	return i;
+}
+
+static void page_chain_add(struct page **head,
+		struct page *chain_first, struct page *chain_last)
+{
+#if 1
+	struct page *tmp;
+	tmp = page_chain_tail(chain_first, NULL);
+	BUG_ON(tmp != chain_last);
+#endif
+
+	/* add chain to head */
+	set_page_private(chain_last, (unsigned long)*head);
+	*head = chain_first;
+}
+
+static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
 {
 	struct page *page = NULL;
+	struct page *tmp = NULL;
+	int i = 0;
 
 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
 	 * So what. It saves a spin_lock. */
-	if (drbd_pp_vacant > 0) {
+	if (drbd_pp_vacant >= number) {
 		spin_lock(&drbd_pp_lock);
-		page = drbd_pp_pool;
-		if (page) {
-			drbd_pp_pool = (struct page *)page_private(page);
-			set_page_private(page, 0); /* just to be polite */
-			drbd_pp_vacant--;
-		}
+		page = page_chain_del(&drbd_pp_pool, number);
+		if (page)
+			drbd_pp_vacant -= number;
 		spin_unlock(&drbd_pp_lock);
+		if (page)
+			return page;
 	}
+
 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
 	 * which in turn might block on the other node at this very place.  */
-	if (!page)
-		page = alloc_page(GFP_TRY);
-	if (page)
-		atomic_inc(&mdev->pp_in_use);
-	return page;
+	for (i = 0; i < number; i++) {
+		tmp = alloc_page(GFP_TRY);
+		if (!tmp)
+			break;
+		set_page_private(tmp, (unsigned long)page);
+		page = tmp;
+	}
+
+	if (i == number)
+		return page;
+
+	/* Not enough pages immediately available this time.
+	 * No need to jump around here, drbd_pp_alloc will retry this
+	 * function "soon". */
+	if (page) {
+		tmp = page_chain_tail(page, NULL);
+		spin_lock(&drbd_pp_lock);
+		page_chain_add(&drbd_pp_pool, page, tmp);
+		drbd_pp_vacant += i;
+		spin_unlock(&drbd_pp_lock);
+	}
+	return NULL;
 }
 
 /* kick lower level device, if we have more than (arbitrary number)
@@ -127,7 +225,7 @@
 
 	list_for_each_safe(le, tle, &mdev->net_ee) {
 		e = list_entry(le, struct drbd_epoch_entry, w.list);
-		if (drbd_bio_has_active_page(e->private_bio))
+		if (drbd_ee_has_active_page(e))
 			break;
 		list_move(le, to_be_freed);
 	}
@@ -148,32 +246,34 @@
 }
 
 /**
- * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
+ * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
  * @mdev:	DRBD device.
- * @retry:	whether or not to retry allocation forever (or until signalled)
+ * @number:	number of pages requested
+ * @retry:	whether to retry, if not enough pages are available right now
  *
- * Tries to allocate a page, first from our own page pool, then from the
- * kernel, unless this allocation would exceed the max_buffers setting.
- * If @retry is non-zero, retry until DRBD frees a page somewhere else.
+ * Tries to allocate number pages, first from our own page pool, then from
+ * the kernel, unless this allocation would exceed the max_buffers setting.
+ * Possibly retry until DRBD frees sufficient pages somewhere else.
+ *
+ * Returns a page chain linked via page->private.
  */
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
+static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
 {
 	struct page *page = NULL;
 	DEFINE_WAIT(wait);
 
-	if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
-		page = drbd_pp_first_page_or_try_alloc(mdev);
-		if (page)
-			return page;
-	}
+	/* Yes, we may run up to @number over max_buffers. If we
+	 * follow it strictly, the admin will get it wrong anyways. */
+	if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+		page = drbd_pp_first_pages_or_try_alloc(mdev, number);
 
-	for (;;) {
+	while (page == NULL) {
 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
 
 		drbd_kick_lo_and_reclaim_net(mdev);
 
 		if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
-			page = drbd_pp_first_page_or_try_alloc(mdev);
+			page = drbd_pp_first_pages_or_try_alloc(mdev, number);
 			if (page)
 				break;
 		}
@@ -190,62 +290,32 @@
 	}
 	finish_wait(&drbd_pp_wait, &wait);
 
+	if (page)
+		atomic_add(number, &mdev->pp_in_use);
 	return page;
 }
 
 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
+ * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Either links the page chain back to the global pool,
+ * or returns all pages to the system. */
 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
 {
-	int free_it;
-
-	spin_lock(&drbd_pp_lock);
-	if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
-		free_it = 1;
-	} else {
-		set_page_private(page, (unsigned long)drbd_pp_pool);
-		drbd_pp_pool = page;
-		drbd_pp_vacant++;
-		free_it = 0;
-	}
-	spin_unlock(&drbd_pp_lock);
-
-	atomic_dec(&mdev->pp_in_use);
-
-	if (free_it)
-		__free_page(page);
-
-	wake_up(&drbd_pp_wait);
-}
-
-static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
-{
-	struct page *p_to_be_freed = NULL;
-	struct page *page;
-	struct bio_vec *bvec;
 	int i;
-
-	spin_lock(&drbd_pp_lock);
-	__bio_for_each_segment(bvec, bio, i, 0) {
-		if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
-			set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
-			p_to_be_freed = bvec->bv_page;
-		} else {
-			set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
-			drbd_pp_pool = bvec->bv_page;
-			drbd_pp_vacant++;
-		}
+	if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+		i = page_chain_free(page);
+	else {
+		struct page *tmp;
+		tmp = page_chain_tail(page, &i);
+		spin_lock(&drbd_pp_lock);
+		page_chain_add(&drbd_pp_pool, page, tmp);
+		drbd_pp_vacant += i;
+		spin_unlock(&drbd_pp_lock);
 	}
-	spin_unlock(&drbd_pp_lock);
-	atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
-
-	while (p_to_be_freed) {
-		page = p_to_be_freed;
-		p_to_be_freed = (struct page *)page_private(page);
-		set_page_private(page, 0); /* just to be polite */
-		put_page(page);
-	}
-
+	atomic_sub(i, &mdev->pp_in_use);
+	i = atomic_read(&mdev->pp_in_use);
+	if (i < 0)
+		dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
 	wake_up(&drbd_pp_wait);
 }
 
@@ -270,11 +340,9 @@
 				     unsigned int data_size,
 				     gfp_t gfp_mask) __must_hold(local)
 {
-	struct request_queue *q;
 	struct drbd_epoch_entry *e;
 	struct page *page;
-	struct bio *bio;
-	unsigned int ds;
+	unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
 
 	if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
 		return NULL;
@@ -286,84 +354,32 @@
 		return NULL;
 	}
 
-	bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
-	if (!bio) {
-		if (!(gfp_mask & __GFP_NOWARN))
-			dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
-		goto fail1;
-	}
+	page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+	if (!page)
+		goto fail;
 
-	bio->bi_bdev = mdev->ldev->backing_bdev;
-	bio->bi_sector = sector;
-
-	ds = data_size;
-	while (ds) {
-		page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
-		if (!page) {
-			if (!(gfp_mask & __GFP_NOWARN))
-				dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
-			goto fail2;
-		}
-		if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
-			drbd_pp_free(mdev, page);
-			dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
-			    "data_size=%u,ds=%u) failed\n",
-			    (unsigned long long)sector, data_size, ds);
-
-			q = bdev_get_queue(bio->bi_bdev);
-			if (q->merge_bvec_fn) {
-				struct bvec_merge_data bvm = {
-					.bi_bdev = bio->bi_bdev,
-					.bi_sector = bio->bi_sector,
-					.bi_size = bio->bi_size,
-					.bi_rw = bio->bi_rw,
-				};
-				int l = q->merge_bvec_fn(q, &bvm,
-						&bio->bi_io_vec[bio->bi_vcnt]);
-				dev_err(DEV, "merge_bvec_fn() = %d\n", l);
-			}
-
-			/* dump more of the bio. */
-			dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
-			dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
-			dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
-			dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
-
-			goto fail2;
-			break;
-		}
-		ds -= min_t(int, ds, PAGE_SIZE);
-	}
-
-	D_ASSERT(data_size == bio->bi_size);
-
-	bio->bi_private = e;
-	e->mdev = mdev;
-	e->sector = sector;
-	e->size = bio->bi_size;
-
-	e->private_bio = bio;
-	e->block_id = id;
 	INIT_HLIST_NODE(&e->colision);
 	e->epoch = NULL;
+	e->mdev = mdev;
+	e->pages = page;
+	atomic_set(&e->pending_bios, 0);
+	e->size = data_size;
 	e->flags = 0;
+	e->sector = sector;
+	e->sector = sector;
+	e->block_id = id;
 
 	return e;
 
- fail2:
-	drbd_pp_free_bio_pages(mdev, bio);
-	bio_put(bio);
- fail1:
+ fail:
 	mempool_free(e, drbd_ee_mempool);
-
 	return NULL;
 }
 
 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
 {
-	struct bio *bio = e->private_bio;
-	drbd_pp_free_bio_pages(mdev, bio);
-	bio_put(bio);
+	drbd_pp_free(mdev, e->pages);
+	D_ASSERT(atomic_read(&e->pending_bios) == 0);
 	D_ASSERT(hlist_unhashed(&e->colision));
 	mempool_free(e, drbd_ee_mempool);
 }
@@ -902,7 +918,7 @@
 	if (!drbd_send_protocol(mdev))
 		return -1;
 	drbd_send_sync_param(mdev, &mdev->sync_conf);
-	drbd_send_sizes(mdev, 0);
+	drbd_send_sizes(mdev, 0, 0);
 	drbd_send_uuids(mdev);
 	drbd_send_state(mdev);
 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
@@ -946,7 +962,8 @@
 	int rv;
 
 	if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
-		rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
+		rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+					NULL, BLKDEV_IFL_WAIT);
 		if (rv) {
 			dev_err(DEV, "local disk flush failed with status %d\n", rv);
 			/* would rather check on EOPNOTSUPP, but that is not reliable.
@@ -1120,6 +1137,101 @@
 }
 
 /**
+ * drbd_submit_ee()
+ * @mdev:	DRBD device.
+ * @e:		epoch entry
+ * @rw:		flag field, see bio->bi_rw
+ */
+/* TODO allocate from our own bio_set. */
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+		const unsigned rw, const int fault_type)
+{
+	struct bio *bios = NULL;
+	struct bio *bio;
+	struct page *page = e->pages;
+	sector_t sector = e->sector;
+	unsigned ds = e->size;
+	unsigned n_bios = 0;
+	unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+
+	if (atomic_read(&mdev->new_c_uuid)) {
+		if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
+			drbd_uuid_new_current(mdev);
+			drbd_md_sync(mdev);
+
+			atomic_dec(&mdev->new_c_uuid);
+			wake_up(&mdev->misc_wait);
+		}
+		wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
+	}
+
+	/* In most cases, we will only need one bio.  But in case the lower
+	 * level restrictions happen to be different at this offset on this
+	 * side than those of the sending peer, we may need to submit the
+	 * request in more than one bio. */
+next_bio:
+	bio = bio_alloc(GFP_NOIO, nr_pages);
+	if (!bio) {
+		dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+		goto fail;
+	}
+	/* > e->sector, unless this is the first bio */
+	bio->bi_sector = sector;
+	bio->bi_bdev = mdev->ldev->backing_bdev;
+	/* we special case some flags in the multi-bio case, see below
+	 * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+	bio->bi_rw = rw;
+	bio->bi_private = e;
+	bio->bi_end_io = drbd_endio_sec;
+
+	bio->bi_next = bios;
+	bios = bio;
+	++n_bios;
+
+	page_chain_for_each(page) {
+		unsigned len = min_t(unsigned, ds, PAGE_SIZE);
+		if (!bio_add_page(bio, page, len, 0)) {
+			/* a single page must always be possible! */
+			BUG_ON(bio->bi_vcnt == 0);
+			goto next_bio;
+		}
+		ds -= len;
+		sector += len >> 9;
+		--nr_pages;
+	}
+	D_ASSERT(page == NULL);
+	D_ASSERT(ds == 0);
+
+	atomic_set(&e->pending_bios, n_bios);
+	do {
+		bio = bios;
+		bios = bios->bi_next;
+		bio->bi_next = NULL;
+
+		/* strip off BIO_RW_UNPLUG unless it is the last bio */
+		if (bios)
+			bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+
+		drbd_generic_make_request(mdev, fault_type, bio);
+
+		/* strip off BIO_RW_BARRIER,
+		 * unless it is the first or last bio */
+		if (bios && bios->bi_next)
+			bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+	} while (bios);
+	maybe_kick_lo(mdev);
+	return 0;
+
+fail:
+	while (bios) {
+		bio = bios;
+		bios = bios->bi_next;
+		bio_put(bio);
+	}
+	return -ENOMEM;
+}
+
+/**
  * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
  * @mdev:	DRBD device.
  * @w:		work object.
@@ -1128,8 +1240,6 @@
 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
 {
 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-	struct bio *bio = e->private_bio;
-
 	/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
 	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
 	   so that we can finish that epoch in drbd_may_finish_epoch().
@@ -1143,33 +1253,17 @@
 	if (previous_epoch(mdev, e->epoch))
 		dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
 
-	/* prepare bio for re-submit,
-	 * re-init volatile members */
 	/* we still have a local reference,
 	 * get_ldev was done in receive_Data. */
-	bio->bi_bdev = mdev->ldev->backing_bdev;
-	bio->bi_sector = e->sector;
-	bio->bi_size = e->size;
-	bio->bi_idx = 0;
-
-	bio->bi_flags &= ~(BIO_POOL_MASK - 1);
-	bio->bi_flags |= 1 << BIO_UPTODATE;
-
-	/* don't know whether this is necessary: */
-	bio->bi_phys_segments = 0;
-	bio->bi_next = NULL;
-
-	/* these should be unchanged: */
-	/* bio->bi_end_io = drbd_endio_write_sec; */
-	/* bio->bi_vcnt = whatever; */
 
 	e->w.cb = e_end_block;
-
-	/* This is no longer a barrier request. */
-	bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
-
-	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
-
+	if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
+		/* drbd_submit_ee fails for one reason only:
+		 * if was not able to allocate sufficient bios.
+		 * requeue, try again later. */
+		e->w.cb = w_e_reissue;
+		drbd_queue_work(&mdev->data.work, &e->w);
+	}
 	return 1;
 }
 
@@ -1261,13 +1355,13 @@
 static struct drbd_epoch_entry *
 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
 {
+	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
 	struct drbd_epoch_entry *e;
-	struct bio_vec *bvec;
 	struct page *page;
-	struct bio *bio;
-	int dgs, ds, i, rr;
+	int dgs, ds, rr;
 	void *dig_in = mdev->int_dig_in;
 	void *dig_vv = mdev->int_dig_vv;
+	unsigned long *data;
 
 	dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
 		crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
@@ -1286,29 +1380,44 @@
 	ERR_IF(data_size &  0x1ff) return NULL;
 	ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
 
+	/* even though we trust out peer,
+	 * we sometimes have to double check. */
+	if (sector + (data_size>>9) > capacity) {
+		dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+			(unsigned long long)capacity,
+			(unsigned long long)sector, data_size);
+		return NULL;
+	}
+
 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
 	 * which in turn might block on the other node at this very place.  */
 	e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
 	if (!e)
 		return NULL;
-	bio = e->private_bio;
+
 	ds = data_size;
-	bio_for_each_segment(bvec, bio, i) {
-		page = bvec->bv_page;
-		rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
+	page = e->pages;
+	page_chain_for_each(page) {
+		unsigned len = min_t(int, ds, PAGE_SIZE);
+		data = kmap(page);
+		rr = drbd_recv(mdev, data, len);
+		if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+			dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+			data[0] = data[0] ^ (unsigned long)-1;
+		}
 		kunmap(page);
-		if (rr != min_t(int, ds, PAGE_SIZE)) {
+		if (rr != len) {
 			drbd_free_ee(mdev, e);
 			dev_warn(DEV, "short read receiving data: read %d expected %d\n",
-			     rr, min_t(int, ds, PAGE_SIZE));
+			     rr, len);
 			return NULL;
 		}
 		ds -= rr;
 	}
 
 	if (dgs) {
-		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+		drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
 		if (memcmp(dig_in, dig_vv, dgs)) {
 			dev_err(DEV, "Digest integrity check FAILED.\n");
 			drbd_bcast_ee(mdev, "digest failed",
@@ -1330,7 +1439,10 @@
 	int rr, rv = 1;
 	void *data;
 
-	page = drbd_pp_alloc(mdev, 1);
+	if (!data_size)
+		return TRUE;
+
+	page = drbd_pp_alloc(mdev, 1, 1);
 
 	data = kmap(page);
 	while (data_size) {
@@ -1394,7 +1506,7 @@
 	}
 
 	if (dgs) {
-		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+		drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
 		if (memcmp(dig_in, dig_vv, dgs)) {
 			dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
 			return 0;
@@ -1415,7 +1527,7 @@
 
 	D_ASSERT(hlist_unhashed(&e->colision));
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		drbd_set_in_sync(mdev, sector, e->size);
 		ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
 	} else {
@@ -1434,30 +1546,28 @@
 	struct drbd_epoch_entry *e;
 
 	e = read_in_block(mdev, ID_SYNCER, sector, data_size);
-	if (!e) {
-		put_ldev(mdev);
-		return FALSE;
-	}
+	if (!e)
+		goto fail;
 
 	dec_rs_pending(mdev);
 
-	e->private_bio->bi_end_io = drbd_endio_write_sec;
-	e->private_bio->bi_rw = WRITE;
-	e->w.cb = e_end_resync_block;
-
 	inc_unacked(mdev);
 	/* corresponding dec_unacked() in e_end_resync_block()
 	 * respective _drbd_clear_done_ee */
 
+	e->w.cb = e_end_resync_block;
+
 	spin_lock_irq(&mdev->req_lock);
 	list_add(&e->w.list, &mdev->sync_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
-	drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
-	/* accounting done in endio */
+	if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
+		return TRUE;
 
-	maybe_kick_lo(mdev);
-	return TRUE;
+	drbd_free_ee(mdev, e);
+fail:
+	put_ldev(mdev);
+	return FALSE;
 }
 
 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
@@ -1552,7 +1662,7 @@
 	}
 
 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
-		if (likely(drbd_bio_uptodate(e->private_bio))) {
+		if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
 				mdev->state.conn <= C_PAUSED_SYNC_T &&
 				e->flags & EE_MAY_SET_IN_SYNC) ?
@@ -1698,7 +1808,6 @@
 		return FALSE;
 	}
 
-	e->private_bio->bi_end_io = drbd_endio_write_sec;
 	e->w.cb = e_end_block;
 
 	spin_lock(&mdev->epoch_lock);
@@ -1894,12 +2003,8 @@
 		drbd_al_begin_io(mdev, e->sector);
 	}
 
-	e->private_bio->bi_rw = rw;
-	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
-	/* accounting done in endio */
-
-	maybe_kick_lo(mdev);
-	return TRUE;
+	if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
+		return TRUE;
 
 out_interrupted:
 	/* yes, the epoch_size now is imbalanced.
@@ -1945,7 +2050,7 @@
 			    "no local data.\n");
 		drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
 				 P_NEG_RS_DREPLY , p);
-		return TRUE;
+		return drbd_drain_block(mdev, h->length - brps);
 	}
 
 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
@@ -1957,9 +2062,6 @@
 		return FALSE;
 	}
 
-	e->private_bio->bi_rw = READ;
-	e->private_bio->bi_end_io = drbd_endio_read_sec;
-
 	switch (h->command) {
 	case P_DATA_REQUEST:
 		e->w.cb = w_e_end_data_req;
@@ -2053,10 +2155,8 @@
 
 	inc_unacked(mdev);
 
-	drbd_generic_make_request(mdev, fault_type, e->private_bio);
-	maybe_kick_lo(mdev);
-
-	return TRUE;
+	if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
+		return TRUE;
 
 out_free_e:
 	kfree(di);
@@ -2473,6 +2573,9 @@
 		     hg > 0 ? "source" : "target");
 	}
 
+	if (abs(hg) == 100)
+		drbd_khelper(mdev, "initial-split-brain");
+
 	if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
 		int pcount = (mdev->state.role == R_PRIMARY)
 			   + (peer_role == R_PRIMARY);
@@ -2518,7 +2621,7 @@
 		 * after an attempted attach on a diskless node.
 		 * We just refuse to attach -- well, we drop the "connection"
 		 * to that disk, in a way... */
-		dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
+		dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
 		drbd_khelper(mdev, "split-brain");
 		return C_MASK;
 	}
@@ -2849,7 +2952,7 @@
 	unsigned int max_seg_s;
 	sector_t p_size, p_usize, my_usize;
 	int ldsc = 0; /* local disk size changed */
-	enum drbd_conns nconn;
+	enum dds_flags ddsf;
 
 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
@@ -2905,8 +3008,9 @@
 	}
 #undef min_not_zero
 
+	ddsf = be16_to_cpu(p->dds_flags);
 	if (get_ldev(mdev)) {
-	  dd = drbd_determin_dev_size(mdev, 0);
+		dd = drbd_determin_dev_size(mdev, ddsf);
 		put_ldev(mdev);
 		if (dd == dev_size_error)
 			return FALSE;
@@ -2916,33 +3020,21 @@
 		drbd_set_my_capacity(mdev, p_size);
 	}
 
-	if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
-		nconn = drbd_sync_handshake(mdev,
-				mdev->state.peer, mdev->state.pdsk);
-		put_ldev(mdev);
-
-		if (nconn == C_MASK) {
-			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-			return FALSE;
-		}
-
-		if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
-			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-			return FALSE;
-		}
-	}
-
 	if (get_ldev(mdev)) {
 		if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
 			mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
 			ldsc = 1;
 		}
 
-		max_seg_s = be32_to_cpu(p->max_segment_size);
+		if (mdev->agreed_pro_version < 94)
+			max_seg_s = be32_to_cpu(p->max_segment_size);
+		else /* drbd 8.3.8 onwards */
+			max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
 		if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
 			drbd_setup_queue_param(mdev, max_seg_s);
 
-		drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
+		drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
 		put_ldev(mdev);
 	}
 
@@ -2951,14 +3043,17 @@
 		    drbd_get_capacity(mdev->this_bdev) || ldsc) {
 			/* we have different sizes, probably peer
 			 * needs to know my new size... */
-			drbd_send_sizes(mdev, 0);
+			drbd_send_sizes(mdev, 0, ddsf);
 		}
 		if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
 		    (dd == grew && mdev->state.conn == C_CONNECTED)) {
 			if (mdev->state.pdsk >= D_INCONSISTENT &&
-			    mdev->state.disk >= D_INCONSISTENT)
-				resync_after_online_grow(mdev);
-			else
+			    mdev->state.disk >= D_INCONSISTENT) {
+				if (ddsf & DDSF_NO_RESYNC)
+					dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+				else
+					resync_after_online_grow(mdev);
+			} else
 				set_bit(RESYNC_AFTER_NEG, &mdev->flags);
 		}
 	}
@@ -3490,6 +3585,92 @@
 	return TRUE;
 }
 
+static void timeval_sub_us(struct timeval* tv, unsigned int us)
+{
+	tv->tv_sec -= us / 1000000;
+	us = us % 1000000;
+	if (tv->tv_usec > us) {
+		tv->tv_usec += 1000000;
+		tv->tv_sec--;
+	}
+	tv->tv_usec -= us;
+}
+
+static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+{
+	struct delay_probe *dp;
+	struct list_head *le;
+	struct timeval now;
+	int seq_num;
+	int offset;
+	int data_delay;
+
+	seq_num = be32_to_cpu(p->seq_num);
+	offset  = be32_to_cpu(p->offset);
+
+	spin_lock(&mdev->peer_seq_lock);
+	if (!list_empty(&mdev->delay_probes)) {
+		if (from == USE_DATA_SOCKET)
+			le = mdev->delay_probes.next;
+		else
+			le = mdev->delay_probes.prev;
+
+		dp = list_entry(le, struct delay_probe, list);
+
+		if (dp->seq_num == seq_num) {
+			list_del(le);
+			spin_unlock(&mdev->peer_seq_lock);
+			do_gettimeofday(&now);
+			timeval_sub_us(&now, offset);
+			data_delay =
+				now.tv_usec - dp->time.tv_usec +
+				(now.tv_sec - dp->time.tv_sec) * 1000000;
+
+			if (data_delay > 0)
+				mdev->data_delay = data_delay;
+
+			kfree(dp);
+			return;
+		}
+
+		if (dp->seq_num > seq_num) {
+			spin_unlock(&mdev->peer_seq_lock);
+			dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
+			return; /* Do not alloca a struct delay_probe.... */
+		}
+	}
+	spin_unlock(&mdev->peer_seq_lock);
+
+	dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
+	if (!dp) {
+		dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
+		return;
+	}
+
+	dp->seq_num = seq_num;
+	do_gettimeofday(&dp->time);
+	timeval_sub_us(&dp->time, offset);
+
+	spin_lock(&mdev->peer_seq_lock);
+	if (from == USE_DATA_SOCKET)
+		list_add(&dp->list, &mdev->delay_probes);
+	else
+		list_add_tail(&dp->list, &mdev->delay_probes);
+	spin_unlock(&mdev->peer_seq_lock);
+}
+
+static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+{
+	struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
+	if (drbd_recv(mdev, h->payload, h->length) != h->length)
+		return FALSE;
+
+	got_delay_probe(mdev, USE_DATA_SOCKET, p);
+	return TRUE;
+}
+
 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
 
 static drbd_cmd_handler_f drbd_default_handler[] = {
@@ -3513,6 +3694,7 @@
 	[P_OV_REQUEST]      = receive_DataRequest,
 	[P_OV_REPLY]        = receive_DataRequest,
 	[P_CSUM_RS_REQUEST]    = receive_DataRequest,
+	[P_DELAY_PROBE]     = receive_delay_probe,
 	/* anything missing from this table is in
 	 * the asender_tbl, see get_asender_cmd */
 	[P_MAX_CMD]	    = NULL,
@@ -3739,7 +3921,7 @@
 		dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
 	i = atomic_read(&mdev->pp_in_use);
 	if (i)
-		dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
+		dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
 
 	D_ASSERT(list_empty(&mdev->read_ee));
 	D_ASSERT(list_empty(&mdev->active_ee));
@@ -4232,7 +4414,6 @@
 
 	sector = be64_to_cpu(p->sector);
 	size = be32_to_cpu(p->blksize);
-	D_ASSERT(p->block_id == ID_SYNCER);
 
 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
@@ -4290,6 +4471,14 @@
 	return TRUE;
 }
 
+static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+{
+	struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+	got_delay_probe(mdev, USE_META_SOCKET, p);
+	return TRUE;
+}
+
 struct asender_cmd {
 	size_t pkt_size;
 	int (*process)(struct drbd_conf *mdev, struct p_header *h);
@@ -4314,6 +4503,7 @@
 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
+	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe), got_delay_probe_m },
 	[P_MAX_CMD]	    = { 0, NULL },
 	};
 	if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de81ab7..3397f11 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -722,6 +722,7 @@
 	struct drbd_request *req;
 	int local, remote;
 	int err = -EIO;
+	int ret = 0;
 
 	/* allocate outside of all locks; */
 	req = drbd_req_new(mdev, bio);
@@ -784,7 +785,7 @@
 			    (mdev->state.pdsk == D_INCONSISTENT &&
 			     mdev->state.conn >= C_CONNECTED));
 
-	if (!(local || remote)) {
+	if (!(local || remote) && !mdev->state.susp) {
 		dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
 		goto fail_free_complete;
 	}
@@ -810,6 +811,16 @@
 	/* GOOD, everything prepared, grab the spin_lock */
 	spin_lock_irq(&mdev->req_lock);
 
+	if (mdev->state.susp) {
+		/* If we got suspended, use the retry mechanism of
+		   generic_make_request() to restart processing of this
+		   bio. In the next call to drbd_make_request_26
+		   we sleep in inc_ap_bio() */
+		ret = 1;
+		spin_unlock_irq(&mdev->req_lock);
+		goto fail_free_complete;
+	}
+
 	if (remote) {
 		remote = (mdev->state.pdsk == D_UP_TO_DATE ||
 			    (mdev->state.pdsk == D_INCONSISTENT &&
@@ -947,12 +958,14 @@
 		req->private_bio = NULL;
 		put_ldev(mdev);
 	}
-	bio_endio(bio, err);
+	if (!ret)
+		bio_endio(bio, err);
+
 	drbd_req_free(req);
 	dec_ap_bio(mdev);
 	kfree(b);
 
-	return 0;
+	return ret;
 }
 
 /* helper function for drbd_make_request
@@ -962,11 +975,6 @@
  */
 static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
 {
-	/* Unconfigured */
-	if (mdev->state.conn == C_DISCONNECTING &&
-	    mdev->state.disk == D_DISKLESS)
-		return 1;
-
 	if (mdev->state.role != R_PRIMARY &&
 		(!allow_oos || is_write)) {
 		if (__ratelimit(&drbd_ratelimit_state)) {
@@ -1070,15 +1078,21 @@
 
 		/* we need to get a "reference count" (ap_bio_cnt)
 		 * to avoid races with the disconnect/reconnect/suspend code.
-		 * In case we need to split the bio here, we need to get two references
+		 * In case we need to split the bio here, we need to get three references
 		 * atomically, otherwise we might deadlock when trying to submit the
 		 * second one! */
-		inc_ap_bio(mdev, 2);
+		inc_ap_bio(mdev, 3);
 
 		D_ASSERT(e_enr == s_enr + 1);
 
-		drbd_make_request_common(mdev, &bp->bio1);
-		drbd_make_request_common(mdev, &bp->bio2);
+		while (drbd_make_request_common(mdev, &bp->bio1))
+			inc_ap_bio(mdev, 1);
+
+		while (drbd_make_request_common(mdev, &bp->bio2))
+			inc_ap_bio(mdev, 1);
+
+		dec_ap_bio(mdev);
+
 		bio_pair_release(bp);
 	}
 	return 0;
@@ -1115,7 +1129,7 @@
 	} else if (limit && get_ldev(mdev)) {
 		struct request_queue * const b =
 			mdev->ldev->backing_bdev->bd_disk->queue;
-		if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+		if (b->merge_bvec_fn) {
 			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
 			limit = min(limit, backing_limit);
 		}
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 76863e3..85179e1 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -70,7 +70,7 @@
 
 static const char *drbd_state_sw_errors[] = {
 	[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
-	[-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk",
+	[-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
 	[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
 	[-SS_NO_REMOTE_DISK] = "Can not resync without remote disk",
 	[-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d48a1df..727ff63 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -47,8 +47,7 @@
 
 /* defined here:
    drbd_md_io_complete
-   drbd_endio_write_sec
-   drbd_endio_read_sec
+   drbd_endio_sec
    drbd_endio_pri
 
  * more endio handlers:
@@ -85,27 +84,10 @@
 /* reads on behalf of the partner,
  * "submitted" by the receiver
  */
-void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
 {
 	unsigned long flags = 0;
-	struct drbd_epoch_entry *e = NULL;
-	struct drbd_conf *mdev;
-	int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
-	e = bio->bi_private;
-	mdev = e->mdev;
-
-	if (error)
-		dev_warn(DEV, "read: error=%d s=%llus\n", error,
-				(unsigned long long)e->sector);
-	if (!error && !uptodate) {
-		dev_warn(DEV, "read: setting error to -EIO s=%llus\n",
-				(unsigned long long)e->sector);
-		/* strange behavior of some lower level drivers...
-		 * fail the request by clearing the uptodate flag,
-		 * but do not return any error?! */
-		error = -EIO;
-	}
+	struct drbd_conf *mdev = e->mdev;
 
 	D_ASSERT(e->block_id != ID_VACANT);
 
@@ -114,49 +96,38 @@
 	list_del(&e->w.list);
 	if (list_empty(&mdev->read_ee))
 		wake_up(&mdev->ee_wait);
+	if (test_bit(__EE_WAS_ERROR, &e->flags))
+		__drbd_chk_io_error(mdev, FALSE);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	drbd_chk_io_error(mdev, error, FALSE);
 	drbd_queue_work(&mdev->data.work, &e->w);
 	put_ldev(mdev);
 }
 
+static int is_failed_barrier(int ee_flags)
+{
+	return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
+			== (EE_IS_BARRIER|EE_WAS_ERROR);
+}
+
 /* writes on behalf of the partner, or resync writes,
- * "submitted" by the receiver.
- */
-void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
+ * "submitted" by the receiver, final stage.  */
+static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
 {
 	unsigned long flags = 0;
-	struct drbd_epoch_entry *e = NULL;
-	struct drbd_conf *mdev;
+	struct drbd_conf *mdev = e->mdev;
 	sector_t e_sector;
 	int do_wake;
 	int is_syncer_req;
 	int do_al_complete_io;
-	int uptodate = bio_flagged(bio, BIO_UPTODATE);
-	int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
 
-	e = bio->bi_private;
-	mdev = e->mdev;
-
-	if (error)
-		dev_warn(DEV, "write: error=%d s=%llus\n", error,
-				(unsigned long long)e->sector);
-	if (!error && !uptodate) {
-		dev_warn(DEV, "write: setting error to -EIO s=%llus\n",
-				(unsigned long long)e->sector);
-		/* strange behavior of some lower level drivers...
-		 * fail the request by clearing the uptodate flag,
-		 * but do not return any error?! */
-		error = -EIO;
-	}
-
-	/* error == -ENOTSUPP would be a better test,
-	 * alas it is not reliable */
-	if (error && is_barrier && e->flags & EE_IS_BARRIER) {
+	/* if this is a failed barrier request, disable use of barriers,
+	 * and schedule for resubmission */
+	if (is_failed_barrier(e->flags)) {
 		drbd_bump_write_ordering(mdev, WO_bdev_flush);
 		spin_lock_irqsave(&mdev->req_lock, flags);
 		list_del(&e->w.list);
+		e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
 		e->w.cb = w_e_reissue;
 		/* put_ldev actually happens below, once we come here again. */
 		__release(local);
@@ -167,17 +138,16 @@
 
 	D_ASSERT(e->block_id != ID_VACANT);
 
-	spin_lock_irqsave(&mdev->req_lock, flags);
-	mdev->writ_cnt += e->size >> 9;
-	is_syncer_req = is_syncer_block_id(e->block_id);
-
 	/* after we moved e to done_ee,
 	 * we may no longer access it,
 	 * it may be freed/reused already!
 	 * (as soon as we release the req_lock) */
 	e_sector = e->sector;
 	do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
+	is_syncer_req = is_syncer_block_id(e->block_id);
 
+	spin_lock_irqsave(&mdev->req_lock, flags);
+	mdev->writ_cnt += e->size >> 9;
 	list_del(&e->w.list); /* has been on active_ee or sync_ee */
 	list_add_tail(&e->w.list, &mdev->done_ee);
 
@@ -190,7 +160,7 @@
 		? list_empty(&mdev->sync_ee)
 		: list_empty(&mdev->active_ee);
 
-	if (error)
+	if (test_bit(__EE_WAS_ERROR, &e->flags))
 		__drbd_chk_io_error(mdev, FALSE);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
@@ -205,7 +175,42 @@
 
 	wake_asender(mdev);
 	put_ldev(mdev);
+}
 
+/* writes on behalf of the partner, or resync writes,
+ * "submitted" by the receiver.
+ */
+void drbd_endio_sec(struct bio *bio, int error)
+{
+	struct drbd_epoch_entry *e = bio->bi_private;
+	struct drbd_conf *mdev = e->mdev;
+	int uptodate = bio_flagged(bio, BIO_UPTODATE);
+	int is_write = bio_data_dir(bio) == WRITE;
+
+	if (error)
+		dev_warn(DEV, "%s: error=%d s=%llus\n",
+				is_write ? "write" : "read", error,
+				(unsigned long long)e->sector);
+	if (!error && !uptodate) {
+		dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+				is_write ? "write" : "read",
+				(unsigned long long)e->sector);
+		/* strange behavior of some lower level drivers...
+		 * fail the request by clearing the uptodate flag,
+		 * but do not return any error?! */
+		error = -EIO;
+	}
+
+	if (error)
+		set_bit(__EE_WAS_ERROR, &e->flags);
+
+	bio_put(bio); /* no need for the bio anymore */
+	if (atomic_dec_and_test(&e->pending_bios)) {
+		if (is_write)
+			drbd_endio_write_sec_final(e);
+		else
+			drbd_endio_read_sec_final(e);
+	}
 }
 
 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
@@ -295,7 +300,34 @@
 	return 1; /* Simply ignore this! */
 }
 
-void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+{
+	struct hash_desc desc;
+	struct scatterlist sg;
+	struct page *page = e->pages;
+	struct page *tmp;
+	unsigned len;
+
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	sg_init_table(&sg, 1);
+	crypto_hash_init(&desc);
+
+	while ((tmp = page_chain_next(page))) {
+		/* all but the last page will be fully used */
+		sg_set_page(&sg, page, PAGE_SIZE, 0);
+		crypto_hash_update(&desc, &sg, sg.length);
+		page = tmp;
+	}
+	/* and now the last, possibly only partially used page */
+	len = e->size & (PAGE_SIZE - 1);
+	sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
+	crypto_hash_update(&desc, &sg, sg.length);
+	crypto_hash_final(&desc, digest);
+}
+
+void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
 {
 	struct hash_desc desc;
 	struct scatterlist sg;
@@ -329,11 +361,11 @@
 		return 1;
 	}
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		digest_size = crypto_hash_digestsize(mdev->csums_tfm);
 		digest = kmalloc(digest_size, GFP_NOIO);
 		if (digest) {
-			drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+			drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
 
 			inc_rs_pending(mdev);
 			ok = drbd_send_drequest_csum(mdev,
@@ -369,23 +401,21 @@
 	/* GFP_TRY, because if there is no memory available right now, this may
 	 * be rescheduled for later. It is "only" background resync, after all. */
 	e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
-	if (!e) {
-		put_ldev(mdev);
-		return 2;
-	}
+	if (!e)
+		goto fail;
 
 	spin_lock_irq(&mdev->req_lock);
 	list_add(&e->w.list, &mdev->read_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
-	e->private_bio->bi_end_io = drbd_endio_read_sec;
-	e->private_bio->bi_rw = READ;
 	e->w.cb = w_e_send_csum;
+	if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+		return 1;
 
-	mdev->read_cnt += size >> 9;
-	drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio);
-
-	return 1;
+	drbd_free_ee(mdev, e);
+fail:
+	put_ldev(mdev);
+	return 2;
 }
 
 void resync_timer_fn(unsigned long data)
@@ -414,13 +444,25 @@
 		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 }
 
+static int calc_resync_rate(struct drbd_conf *mdev)
+{
+	int d = mdev->data_delay / 1000; /* us -> ms */
+	int td = mdev->sync_conf.throttle_th * 100;  /* 0.1s -> ms */
+	int hd = mdev->sync_conf.hold_off_th * 100;  /* 0.1s -> ms */
+	int cr = mdev->sync_conf.rate;
+
+	return d <= td ? cr :
+		d >= hd ? 0 :
+		cr + (cr * (td - d) / (hd - td));
+}
+
 int w_make_resync_request(struct drbd_conf *mdev,
 		struct drbd_work *w, int cancel)
 {
 	unsigned long bit;
 	sector_t sector;
 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
-	int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+	int max_segment_size;
 	int number, i, size, pe, mx;
 	int align, queued, sndbuf;
 
@@ -446,7 +488,13 @@
 		return 1;
 	}
 
-	number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
+	/* starting with drbd 8.3.8, we can handle multi-bio EEs,
+	 * if it should be necessary */
+	max_segment_size = mdev->agreed_pro_version < 94 ?
+		queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
+	mdev->c_sync_rate = calc_resync_rate(mdev);
+	number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
 	pe = atomic_read(&mdev->rs_pending_cnt);
 
 	mutex_lock(&mdev->data.mutex);
@@ -509,12 +557,6 @@
 		 *
 		 * Additionally always align bigger requests, in order to
 		 * be prepared for all stripe sizes of software RAIDs.
-		 *
-		 * we _do_ care about the agreed-upon q->max_segment_size
-		 * here, as splitting up the requests on the other side is more
-		 * difficult.  the consequence is, that on lvm and md and other
-		 * "indirect" devices, this is dead code, since
-		 * q->max_segment_size will be PAGE_SIZE.
 		 */
 		align = 1;
 		for (;;) {
@@ -806,7 +848,7 @@
 /* helper */
 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
 {
-	if (drbd_bio_has_active_page(e->private_bio)) {
+	if (drbd_ee_has_active_page(e)) {
 		/* This might happen if sendpage() has not finished */
 		spin_lock_irq(&mdev->req_lock);
 		list_add_tail(&e->w.list, &mdev->net_ee);
@@ -832,7 +874,7 @@
 		return 1;
 	}
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		ok = drbd_send_block(mdev, P_DATA_REPLY, e);
 	} else {
 		if (__ratelimit(&drbd_ratelimit_state))
@@ -873,7 +915,7 @@
 		put_ldev(mdev);
 	}
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
 			inc_rs_pending(mdev);
 			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -921,7 +963,7 @@
 
 	di = (struct digest_info *)(unsigned long)e->block_id;
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		/* quick hack to try to avoid a race against reconfiguration.
 		 * a real fix would be much more involved,
 		 * introducing more locking mechanisms */
@@ -931,7 +973,7 @@
 			digest = kmalloc(digest_size, GFP_NOIO);
 		}
 		if (digest) {
-			drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+			drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
 			eq = !memcmp(digest, di->digest, digest_size);
 			kfree(digest);
 		}
@@ -973,14 +1015,14 @@
 	if (unlikely(cancel))
 		goto out;
 
-	if (unlikely(!drbd_bio_uptodate(e->private_bio)))
+	if (unlikely((e->flags & EE_WAS_ERROR) != 0))
 		goto out;
 
 	digest_size = crypto_hash_digestsize(mdev->verify_tfm);
 	/* FIXME if this allocation fails, online verify will not terminate! */
 	digest = kmalloc(digest_size, GFP_NOIO);
 	if (digest) {
-		drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+		drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
 		inc_rs_pending(mdev);
 		ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
 					     digest, digest_size, P_OV_REPLY);
@@ -1029,11 +1071,11 @@
 
 	di = (struct digest_info *)(unsigned long)e->block_id;
 
-	if (likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 		digest_size = crypto_hash_digestsize(mdev->verify_tfm);
 		digest = kmalloc(digest_size, GFP_NOIO);
 		if (digest) {
-			drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+			drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
 
 			D_ASSERT(digest_size == di->digest_size);
 			eq = !memcmp(digest, di->digest, digest_size);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index f93fa11..defdb50 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -18,23 +18,9 @@
 
 #define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
 
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
-	struct bio_vec *bvec;
-	int i;
-
-	__bio_for_each_segment(bvec, bio, i, 0) {
-		if (page_count(bvec->bv_page) > 1)
-			return 1;
-	}
-
-	return 0;
-}
-
 /* bi_end_io handlers */
 extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_read_sec(struct bio *bio, int error);
-extern void drbd_endio_write_sec(struct bio *bio, int error);
+extern void drbd_endio_sec(struct bio *bio, int error);
 extern void drbd_endio_pri(struct bio *bio, int error);
 
 /*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8546d12..a90e83c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -835,6 +835,8 @@
 
 	set_capacity(lo->lo_disk, size);
 	bd_set_size(bdev, size << 9);
+	/* let user-space know about the new size */
+	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 
 	set_blocksize(bdev, lo_blocksize);
 
@@ -858,6 +860,7 @@
 	set_capacity(lo->lo_disk, 0);
 	invalidate_bdev(bdev);
 	bd_set_size(bdev, 0);
+	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 	mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
 	lo->lo_state = Lo_unbound;
  out_putf:
@@ -944,8 +947,11 @@
 	if (bdev)
 		invalidate_bdev(bdev);
 	set_capacity(lo->lo_disk, 0);
-	if (bdev)
+	if (bdev) {
 		bd_set_size(bdev, 0);
+		/* let user-space know about this change */
+		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+	}
 	mapping_set_gfp_mask(filp->f_mapping, gfp);
 	lo->lo_state = Lo_unbound;
 	/* This is safe: open() is still holding a reference. */
@@ -1189,6 +1195,8 @@
 	sz <<= 9;
 	mutex_lock(&bdev->bd_mutex);
 	bd_set_size(bdev, sz);
+	/* let user-space know about the new size */
+	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 	mutex_unlock(&bdev->bd_mutex);
 
  out:
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3141dd3..e21175b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -276,11 +276,19 @@
 	  Allows synchronous HDLC communications with tty device drivers that
 	  support synchronous HDLC such as the Microgate SyncLink adapter.
 
-	  This driver can only be built as a module ( = code which can be
+	  This driver can be built as a module ( = code which can be
 	  inserted in and removed from the running kernel whenever you want).
 	  The module will be called n_hdlc. If you want to do that, say M
 	  here.
 
+config N_GSM
+	tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on NET
+	help
+	  This line discipline provides support for the GSM MUX protocol and
+	  presents the mux as a set of 61 individual tty devices.
+
 config RISCOM8
 	tristate "SDL RISCom/8 card support"
 	depends on SERIAL_NONSTANDARD
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f957edf..d39be4c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -40,6 +40,7 @@
 obj-$(CONFIG_SYNCLINKMP)	+= synclinkmp.o
 obj-$(CONFIG_SYNCLINK_GT)	+= synclink_gt.o
 obj-$(CONFIG_N_HDLC)		+= n_hdlc.o
+obj-$(CONFIG_N_GSM)		+= n_gsm.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12c..1204909 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@
 #define PGE_EMPTY(b, p)	(!(p) || (p) == (unsigned long) (b)->scratch_page)
 
 
-/* Intel registers */
-#define INTEL_APSIZE	0xb4
-#define INTEL_ATTBASE	0xb8
-#define INTEL_AGPCTRL	0xb0
-#define INTEL_NBXCFG	0x50
-#define INTEL_ERRSTS	0x91
-
-/* Intel i830 registers */
-#define I830_GMCH_CTRL			0x52
-#define I830_GMCH_ENABLED		0x4
-#define I830_GMCH_MEM_MASK		0x1
-#define I830_GMCH_MEM_64M		0x1
-#define I830_GMCH_MEM_128M		0
-#define I830_GMCH_GMS_MASK		0x70
-#define I830_GMCH_GMS_DISABLED		0x00
-#define I830_GMCH_GMS_LOCAL		0x10
-#define I830_GMCH_GMS_STOLEN_512	0x20
-#define I830_GMCH_GMS_STOLEN_1024	0x30
-#define I830_GMCH_GMS_STOLEN_8192	0x40
-#define I830_RDRAM_CHANNEL_TYPE		0x03010
-#define I830_RDRAM_ND(x)		(((x) & 0x20) >> 5)
-#define I830_RDRAM_DDT(x)		(((x) & 0x18) >> 3)
-
-/* This one is for I830MP w. an external graphic card */
-#define INTEL_I830_ERRSTS	0x92
-
-/* Intel 855GM/852GM registers */
-#define I855_GMCH_GMS_MASK		0xF0
-#define I855_GMCH_GMS_STOLEN_0M		0x0
-#define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
-#define I85X_CAPID			0x44
-#define I85X_VARIANT_MASK		0x7
-#define I85X_VARIANT_SHIFT		5
-#define I855_GME			0x0
-#define I855_GM				0x4
-#define I852_GME			0x2
-#define I852_GM				0x5
-
-/* Intel i845 registers */
-#define INTEL_I845_AGPM		0x51
-#define INTEL_I845_ERRSTS	0xc8
-
-/* Intel i860 registers */
-#define INTEL_I860_MCHCFG	0x50
-#define INTEL_I860_ERRSTS	0xc8
-
-/* Intel i810 registers */
-#define I810_GMADDR		0x10
-#define I810_MMADDR		0x14
-#define I810_PTE_BASE		0x10000
-#define I810_PTE_MAIN_UNCACHED	0x00000000
-#define I810_PTE_LOCAL		0x00000002
-#define I810_PTE_VALID		0x00000001
-#define I830_PTE_SYSTEM_CACHED  0x00000006
-#define I810_SMRAM_MISCC	0x70
-#define I810_GFX_MEM_WIN_SIZE	0x00010000
-#define I810_GFX_MEM_WIN_32M	0x00010000
-#define I810_GMS		0x000000c0
-#define I810_GMS_DISABLE	0x00000000
-#define I810_PGETBL_CTL		0x2020
-#define I810_PGETBL_ENABLED	0x00000001
-#define I965_PGETBL_SIZE_MASK	0x0000000e
-#define I965_PGETBL_SIZE_512KB	(0 << 1)
-#define I965_PGETBL_SIZE_256KB	(1 << 1)
-#define I965_PGETBL_SIZE_128KB	(2 << 1)
-#define I965_PGETBL_SIZE_1MB	(3 << 1)
-#define I965_PGETBL_SIZE_2MB	(4 << 1)
-#define I965_PGETBL_SIZE_1_5MB	(5 << 1)
-#define G33_PGETBL_SIZE_MASK    (3 << 8)
-#define G33_PGETBL_SIZE_1M      (1 << 8)
-#define G33_PGETBL_SIZE_2M      (2 << 8)
-
-#define I810_DRAM_CTL		0x3000
-#define I810_DRAM_ROW_0		0x00000001
-#define I810_DRAM_ROW_0_SDRAM	0x00000001
-
 struct agp_device_ids {
 	unsigned short device_id; /* first, to make table easier to read */
 	enum chipset_type chipset;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index d2ce68f..fd79351 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -204,6 +204,7 @@
 	.aperture_sizes		= ali_generic_sizes,
 	.size_type		= U32_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= ali_configure,
 	.fetch_size		= ali_fetch_size,
 	.cleanup		= ali_cleanup,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index a7637d7..b6b1568 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -142,6 +142,7 @@
 {
 	struct aper_size_info_lvl2 *value;
 	struct amd_page_map page_dir;
+	unsigned long __iomem *cur_gatt;
 	unsigned long addr;
 	int retval;
 	u32 temp;
@@ -178,6 +179,13 @@
 		readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));	/* PCI Posting. */
 	}
 
+	for (i = 0; i < value->num_entries; i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+		readl(cur_gatt+GET_GATT_OFF(addr));	/* PCI Posting. */
+	}
+
 	return 0;
 }
 
@@ -375,6 +383,7 @@
 	.aperture_sizes		= amd_irongate_sizes,
 	.size_type		= LVL2_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= amd_irongate_configure,
 	.fetch_size		= amd_irongate_fetch_size,
 	.cleanup		= amd_irongate_cleanup,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead..67ea3a6 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -210,6 +210,7 @@
 	.aperture_sizes		= amd_8151_sizes,
 	.size_type		= U32_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= amd_8151_configure,
 	.fetch_size		= amd64_fetch_size,
 	.cleanup		= amd64_cleanup,
@@ -499,6 +500,10 @@
 	u8 cap_ptr;
 	int err;
 
+	/* The Highlander principle */
+	if (agp_bridges_found)
+		return -ENODEV;
+
 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
 	if (!cap_ptr)
 		return -ENODEV;
@@ -562,6 +567,8 @@
 			   amd64_aperture_sizes[bridge->aperture_size_idx].size);
 	agp_remove_bridge(bridge);
 	agp_put_bridge(bridge);
+
+	agp_bridges_found--;
 }
 
 #ifdef CONFIG_PM
@@ -709,6 +716,11 @@
 
 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
 
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+	{ PCI_DEVICE_CLASS(0, 0) },
+	{ }
+};
+
 static struct pci_driver agp_amd64_pci_driver = {
 	.name		= "agpgart-amd64",
 	.id_table	= agp_amd64_pci_table,
@@ -734,7 +746,6 @@
 		return err;
 
 	if (agp_bridges_found == 0) {
-		struct pci_dev *dev;
 		if (!agp_try_unsupported && !agp_try_unsupported_boot) {
 			printk(KERN_INFO PFX "No supported AGP bridge found.\n");
 #ifdef MODULE
@@ -750,17 +761,10 @@
 			return -ENODEV;
 
 		/* Look for any AGP bridge */
-		dev = NULL;
-		err = -ENODEV;
-		for_each_pci_dev(dev) {
-			if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
-				continue;
-			/* Only one bridge supported right now */
-			if (agp_amd64_probe(dev, NULL) == 0) {
-				err = 0;
-				break;
-			}
-		}
+		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+		err = driver_attach(&agp_amd64_pci_driver.driver);
+		if (err == 0 && agp_bridges_found == 0)
+			err = -ENODEV;
 	}
 	return err;
 }
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3b2ecbe..dc30e22 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -341,6 +341,7 @@
 {
 	struct aper_size_info_lvl2 *value;
 	struct ati_page_map page_dir;
+	unsigned long __iomem *cur_gatt;
 	unsigned long addr;
 	int retval;
 	u32 temp;
@@ -395,6 +396,12 @@
 		readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));	/* PCI Posting. */
 	}
 
+	for (i = 0; i < value->num_entries; i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+	}
+
 	return 0;
 }
 
@@ -415,6 +422,7 @@
 	.aperture_sizes		= ati_generic_sizes,
 	.size_type		= LVL2_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= ati_configure,
 	.fetch_size		= ati_fetch_size,
 	.cleanup		= ati_cleanup,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39e..aa109cb 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
 #include <linux/page-flags.h>
 #include <linux/mm.h>
 #include "agp.h"
+#include "intel-agp.h"
 
 /*
  * The real differences to the generic AGP code is
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248e..d836a71 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
 #include <linux/agp_backend.h>
 #include <asm/smp.h>
 #include "agp.h"
+#include "intel-agp.h"
+
+#include "intel-gtt.c"
 
 int intel_agp_enabled;
 EXPORT_SYMBOL(intel_agp_enabled);
 
-/*
- * If we have Intel graphics, we're not going to have anything other than
- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
- * Only newer chipsets need to bother with this, of course.
- */
-#ifdef CONFIG_DMAR
-#define USE_PCI_DMA_API 1
-#endif
-
-#define PCI_DEVICE_ID_INTEL_E7221_HB	0x2588
-#define PCI_DEVICE_ID_INTEL_E7221_IG	0x258a
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
-#define PCI_DEVICE_ID_INTEL_82946GZ_IG      0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_HB     0x2980
-#define PCI_DEVICE_ID_INTEL_82G35_IG     0x2982
-#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
-#define PCI_DEVICE_ID_INTEL_82965Q_IG       0x2992
-#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
-#define PCI_DEVICE_ID_INTEL_82965G_IG       0x29A2
-#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GM_IG      0x2A02
-#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
-#define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
-#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
-#define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
-#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
-#define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
-#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
-#define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
-#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
-#define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
-#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
-#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
-#define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
-#define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
-#define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
-#define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
-#define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
-#define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
-#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
-#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
-
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
-		IS_SNB)
-
-extern int agp_memory_reserved;
-
-
-/* Intel 815 register */
-#define INTEL_815_APCONT	0x51
-#define INTEL_815_ATTBASE_MASK	~0x1FFFFFFF
-
-/* Intel i820 registers */
-#define INTEL_I820_RDCR		0x51
-#define INTEL_I820_ERRSTS	0xc8
-
-/* Intel i840 registers */
-#define INTEL_I840_MCHCFG	0x50
-#define INTEL_I840_ERRSTS	0xc8
-
-/* Intel i850 registers */
-#define INTEL_I850_MCHCFG	0x50
-#define INTEL_I850_ERRSTS	0xc8
-
-/* intel 915G registers */
-#define I915_GMADDR	0x18
-#define I915_MMADDR	0x10
-#define I915_PTEADDR	0x1C
-#define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
-
-#define I915_IFPADDR    0x60
-
-/* Intel 965G registers */
-#define I965_MSAC 0x62
-#define I965_IFPADDR    0x70
-
-/* Intel 7505 registers */
-#define INTEL_I7505_APSIZE	0x74
-#define INTEL_I7505_NCAPID	0x60
-#define INTEL_I7505_NISTAT	0x6c
-#define INTEL_I7505_ATTBASE	0x78
-#define INTEL_I7505_ERRSTS	0x42
-#define INTEL_I7505_AGPCTRL	0x70
-#define INTEL_I7505_MCHCFG	0x50
-
-#define SNB_GMCH_CTRL	0x50
-#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
-#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
-#define SNB_GTT_SIZE_0M			(0 << 8)
-#define SNB_GTT_SIZE_1M			(1 << 8)
-#define SNB_GTT_SIZE_2M			(2 << 8)
-#define SNB_GTT_SIZE_MASK		(3 << 8)
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
-	{64, 16384, 4},
-	/* The 32M mode still requires a 64k gatt */
-	{32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY	1
-#define AGP_PHYS_MEMORY		2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
-	 .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-static struct _intel_private {
-	struct pci_dev *pcidev;	/* device one */
-	u8 __iomem *registers;
-	u32 __iomem *gtt;		/* I915G */
-	int num_dcache_entries;
-	/* gtt_entries is the number of gtt entries that are already mapped
-	 * to stolen memory.  Stolen memory is larger than the memory mapped
-	 * through gtt_entries, as it includes some reserved space for the BIOS
-	 * popup and for the GTT.
-	 */
-	int gtt_entries;			/* i830+ */
-	int gtt_total_size;
-	union {
-		void __iomem *i9xx_flush_page;
-		void *i8xx_flush_page;
-	};
-	struct page *i8xx_page;
-	struct resource ifp_resource;
-	int resource_valid;
-} intel_private;
-
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
-	*ret = pci_map_page(intel_private.pcidev, page, 0,
-			    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(intel_private.pcidev, *ret))
-		return -EINVAL;
-	return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
-	pci_unmap_page(intel_private.pcidev, dma,
-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
-
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
-	struct sg_table st;
-
-	st.sgl = mem->sg_list;
-	st.orig_nents = st.nents = mem->page_count;
-
-	sg_free_table(&st);
-
-	mem->sg_list = NULL;
-	mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
-{
-	struct sg_table st;
-	struct scatterlist *sg;
-	int i;
-
-	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
-
-	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
-		return -ENOMEM;
-
-	mem->sg_list = sg = st.sgl;
-
-	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
-		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
-
-	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
-				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	if (unlikely(!mem->num_sg)) {
-		intel_agp_free_sglist(mem);
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static void intel_agp_unmap_memory(struct agp_memory *mem)
-{
-	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
-
-	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
-		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	intel_agp_free_sglist(mem);
-}
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-					off_t pg_start, int mask_type)
-{
-	struct scatterlist *sg;
-	int i, j;
-
-	j = pg_start;
-
-	WARN_ON(!mem->num_sg);
-
-	if (mem->num_sg == mem->page_count) {
-		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
-			writel(agp_bridge->driver->mask_memory(agp_bridge,
-					sg_dma_address(sg), mask_type),
-					intel_private.gtt+j);
-			j++;
-		}
-	} else {
-		/* sg may merge pages, but we have to separate
-		 * per-page addr for GTT */
-		unsigned int len, m;
-
-		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
-			len = sg_dma_len(sg) / PAGE_SIZE;
-			for (m = 0; m < len; m++) {
-				writel(agp_bridge->driver->mask_memory(agp_bridge,
-								       sg_dma_address(sg) + m * PAGE_SIZE,
-								       mask_type),
-				       intel_private.gtt+j);
-				j++;
-			}
-		}
-	}
-	readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-					off_t pg_start, int mask_type)
-{
-	int i, j;
-	u32 cache_bits = 0;
-
-	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-	{
-		cache_bits = I830_PTE_SYSTEM_CACHED;
-	}
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		writel(agp_bridge->driver->mask_memory(agp_bridge,
-				page_to_phys(mem->pages[i]), mask_type),
-		       intel_private.gtt+j);
-	}
-
-	readl(intel_private.gtt+j-1);
-}
-
-#endif
-
-static int intel_i810_fetch_size(void)
-{
-	u32 smram_miscc;
-	struct aper_size_info_fixed *values;
-
-	pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
-	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-		dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
-		return 0;
-	}
-	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-		agp_bridge->previous_size =
-			agp_bridge->current_size = (void *) (values + 1);
-		agp_bridge->aperture_size_idx = 1;
-		return values[1].size;
-	} else {
-		agp_bridge->previous_size =
-			agp_bridge->current_size = (void *) (values);
-		agp_bridge->aperture_size_idx = 0;
-		return values[0].size;
-	}
-
-	return 0;
-}
-
-static int intel_i810_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-	if (!intel_private.registers) {
-		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-		temp &= 0xfff80000;
-
-		intel_private.registers = ioremap(temp, 128 * 4096);
-		if (!intel_private.registers) {
-			dev_err(&intel_private.pcidev->dev,
-				"can't remap memory\n");
-			return -ENOMEM;
-		}
-	}
-
-	if ((readl(intel_private.registers+I810_DRAM_CTL)
-		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-		/* This will need to be dynamically assigned */
-		dev_info(&intel_private.pcidev->dev,
-			 "detected 4MB dedicated video ram\n");
-		intel_private.num_dcache_entries = 1024;
-	}
-	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-
-	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = 0; i < current_size->num_entries; i++) {
-			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
-	}
-	global_cache_flush();
-	return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-	writel(0, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers);	/* PCI Posting. */
-	iounmap(intel_private.registers);
-}
-
-static void intel_i810_tlbflush(struct agp_memory *mem)
-{
-	return;
-}
-
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
-	return;
-}
-
-/* Exists to support ARGB cursors */
-static struct page *i8xx_alloc_pages(void)
-{
-	struct page *page;
-
-	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
-	if (page == NULL)
-		return NULL;
-
-	if (set_pages_uc(page, 4) < 0) {
-		set_pages_wb(page, 4);
-		__free_pages(page, 2);
-		return NULL;
-	}
-	get_page(page);
-	atomic_inc(&agp_bridge->current_memory_agp);
-	return page;
-}
-
-static void i8xx_destroy_pages(struct page *page)
-{
-	if (page == NULL)
-		return;
-
-	set_pages_wb(page, 4);
-	put_page(page);
-	__free_pages(page, 2);
-	atomic_dec(&agp_bridge->current_memory_agp);
-}
-
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
-					int type)
-{
-	if (type < AGP_USER_TYPES)
-		return type;
-	else if (type == AGP_USER_CACHED_MEMORY)
-		return INTEL_AGP_CACHED_MEMORY;
-	else
-		return 0;
-}
-
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
-{
-	int i, j, num_entries;
-	void *temp;
-	int ret = -EINVAL;
-	int mask_type;
-
-	if (mem->page_count == 0)
-		goto out;
-
-	temp = agp_bridge->current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
-
-	if ((pg_start + mem->page_count) > num_entries)
-		goto out_err;
-
-
-	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
-			ret = -EBUSY;
-			goto out_err;
-		}
-	}
-
-	if (type != mem->type)
-		goto out_err;
-
-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-	switch (mask_type) {
-	case AGP_DCACHE_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
-			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
-			       intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-		break;
-	case AGP_PHYS_MEMORY:
-	case AGP_NORMAL_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-			writel(agp_bridge->driver->mask_memory(agp_bridge,
-					page_to_phys(mem->pages[i]), mask_type),
-			       intel_private.registers+I810_PTE_BASE+(j*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-		break;
-	default:
-		goto out_err;
-	}
-
-	agp_bridge->driver->tlb_flush(mem);
-out:
-	ret = 0;
-out_err:
-	mem->is_flushed = true;
-	return ret;
-}
-
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
-{
-	int i;
-
-	if (mem->page_count == 0)
-		return 0;
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-	}
-	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
-	agp_bridge->driver->tlb_flush(mem);
-	return 0;
-}
-
-/*
- * The i810/i830 requires a physical address to program its mouse
- * pointer into hardware.
- * However the Xserver still writes to it through the agp aperture.
- */
-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
-{
-	struct agp_memory *new;
-	struct page *page;
-
-	switch (pg_count) {
-	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
-		break;
-	case 4:
-		/* kludge to get 4 physical pages for ARGB cursor */
-		page = i8xx_alloc_pages();
-		break;
-	default:
-		return NULL;
-	}
-
-	if (page == NULL)
-		return NULL;
-
-	new = agp_create_memory(pg_count);
-	if (new == NULL)
-		return NULL;
-
-	new->pages[0] = page;
-	if (pg_count == 4) {
-		/* kludge to get 4 physical pages for ARGB cursor */
-		new->pages[1] = new->pages[0] + 1;
-		new->pages[2] = new->pages[1] + 1;
-		new->pages[3] = new->pages[2] + 1;
-	}
-	new->page_count = pg_count;
-	new->num_scratch_pages = pg_count;
-	new->type = AGP_PHYS_MEMORY;
-	new->physical = page_to_phys(new->pages[0]);
-	return new;
-}
-
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-	struct agp_memory *new;
-
-	if (type == AGP_DCACHE_MEMORY) {
-		if (pg_count != intel_private.num_dcache_entries)
-			return NULL;
-
-		new = agp_create_memory(1);
-		if (new == NULL)
-			return NULL;
-
-		new->type = AGP_DCACHE_MEMORY;
-		new->page_count = pg_count;
-		new->num_scratch_pages = 0;
-		agp_free_page_array(new);
-		return new;
-	}
-	if (type == AGP_PHYS_MEMORY)
-		return alloc_agpphysmem_i8xx(pg_count, type);
-	return NULL;
-}
-
-static void intel_i810_free_by_type(struct agp_memory *curr)
-{
-	agp_free_key(curr->key);
-	if (curr->type == AGP_PHYS_MEMORY) {
-		if (curr->page_count == 4)
-			i8xx_destroy_pages(curr->pages[0]);
-		else {
-			agp_bridge->driver->agp_destroy_page(curr->pages[0],
-							     AGP_PAGE_DESTROY_UNMAP);
-			agp_bridge->driver->agp_destroy_page(curr->pages[0],
-							     AGP_PAGE_DESTROY_FREE);
-		}
-		agp_free_page_array(curr);
-	}
-	kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
-					    dma_addr_t addr, int type)
-{
-	/* Type checking must be done elsewhere */
-	return addr | bridge->driver->masks[type].mask;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
-	{128, 32768, 5},
-	/* The 64M mode still requires a 128k gatt */
-	{64, 16384, 5},
-	{256, 65536, 6},
-	{512, 131072, 7},
-};
-
-static void intel_i830_init_gtt_entries(void)
-{
-	u16 gmch_ctrl;
-	int gtt_entries = 0;
-	u8 rdct;
-	int local = 0;
-	static const int ddt[4] = { 0, 16, 32, 64 };
-	int size; /* reserved space (in kb) at the top of stolen memory */
-
-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
-	if (IS_I965) {
-		u32 pgetbl_ctl;
-		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
-		/* The 965 has a field telling us the size of the GTT,
-		 * which may be larger than what is necessary to map the
-		 * aperture.
-		 */
-		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-		case I965_PGETBL_SIZE_128KB:
-			size = 128;
-			break;
-		case I965_PGETBL_SIZE_256KB:
-			size = 256;
-			break;
-		case I965_PGETBL_SIZE_512KB:
-			size = 512;
-			break;
-		case I965_PGETBL_SIZE_1MB:
-			size = 1024;
-			break;
-		case I965_PGETBL_SIZE_2MB:
-			size = 2048;
-			break;
-		case I965_PGETBL_SIZE_1_5MB:
-			size = 1024 + 512;
-			break;
-		default:
-			dev_info(&intel_private.pcidev->dev,
-				 "unknown page table size, assuming 512KB\n");
-			size = 512;
-		}
-		size += 4; /* add in BIOS popup space */
-	} else if (IS_G33 && !IS_PINEVIEW) {
-	/* G33's GTT size defined in gmch_ctrl */
-		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
-		case G33_PGETBL_SIZE_1M:
-			size = 1024;
-			break;
-		case G33_PGETBL_SIZE_2M:
-			size = 2048;
-			break;
-		default:
-			dev_info(&agp_bridge->dev->dev,
-				 "unknown page table size 0x%x, assuming 512KB\n",
-				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
-			size = 512;
-		}
-		size += 4;
-	} else if (IS_G4X || IS_PINEVIEW) {
-		/* On 4 series hardware, GTT stolen is separate from graphics
-		 * stolen, ignore it in stolen gtt entries counting.  However,
-		 * 4KB of the stolen memory doesn't get mapped to the GTT.
-		 */
-		size = 4;
-	} else {
-		/* On previous hardware, the GTT size was just what was
-		 * required to map the aperture.
-		 */
-		size = agp_bridge->driver->fetch_size() + 4;
-	}
-
-	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
-	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
-		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
-		case I830_GMCH_GMS_STOLEN_512:
-			gtt_entries = KB(512) - KB(size);
-			break;
-		case I830_GMCH_GMS_STOLEN_1024:
-			gtt_entries = MB(1) - KB(size);
-			break;
-		case I830_GMCH_GMS_STOLEN_8192:
-			gtt_entries = MB(8) - KB(size);
-			break;
-		case I830_GMCH_GMS_LOCAL:
-			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
-			gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
-					MB(ddt[I830_RDRAM_DDT(rdct)]);
-			local = 1;
-			break;
-		default:
-			gtt_entries = 0;
-			break;
-		}
-	} else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-		   agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
-		/*
-		 * SandyBridge has new memory control reg at 0x50.w
-		 */
-		u16 snb_gmch_ctl;
-		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
-		case SNB_GMCH_GMS_STOLEN_32M:
-			gtt_entries = MB(32) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_64M:
-			gtt_entries = MB(64) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_96M:
-			gtt_entries = MB(96) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_128M:
-			gtt_entries = MB(128) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_160M:
-			gtt_entries = MB(160) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_192M:
-			gtt_entries = MB(192) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_224M:
-			gtt_entries = MB(224) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_256M:
-			gtt_entries = MB(256) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_288M:
-			gtt_entries = MB(288) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_320M:
-			gtt_entries = MB(320) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_352M:
-			gtt_entries = MB(352) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_384M:
-			gtt_entries = MB(384) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_416M:
-			gtt_entries = MB(416) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_448M:
-			gtt_entries = MB(448) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_480M:
-			gtt_entries = MB(480) - KB(size);
-			break;
-		case SNB_GMCH_GMS_STOLEN_512M:
-			gtt_entries = MB(512) - KB(size);
-			break;
-		}
-	} else {
-		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
-		case I855_GMCH_GMS_STOLEN_1M:
-			gtt_entries = MB(1) - KB(size);
-			break;
-		case I855_GMCH_GMS_STOLEN_4M:
-			gtt_entries = MB(4) - KB(size);
-			break;
-		case I855_GMCH_GMS_STOLEN_8M:
-			gtt_entries = MB(8) - KB(size);
-			break;
-		case I855_GMCH_GMS_STOLEN_16M:
-			gtt_entries = MB(16) - KB(size);
-			break;
-		case I855_GMCH_GMS_STOLEN_32M:
-			gtt_entries = MB(32) - KB(size);
-			break;
-		case I915_GMCH_GMS_STOLEN_48M:
-			/* Check it's really I915G */
-			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-				gtt_entries = MB(48) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case I915_GMCH_GMS_STOLEN_64M:
-			/* Check it's really I915G */
-			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-				gtt_entries = MB(64) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case G33_GMCH_GMS_STOLEN_128M:
-			if (IS_G33 || IS_I965 || IS_G4X)
-				gtt_entries = MB(128) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case G33_GMCH_GMS_STOLEN_256M:
-			if (IS_G33 || IS_I965 || IS_G4X)
-				gtt_entries = MB(256) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_96M:
-			if (IS_I965 || IS_G4X)
-				gtt_entries = MB(96) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_160M:
-			if (IS_I965 || IS_G4X)
-				gtt_entries = MB(160) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_224M:
-			if (IS_I965 || IS_G4X)
-				gtt_entries = MB(224) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_352M:
-			if (IS_I965 || IS_G4X)
-				gtt_entries = MB(352) - KB(size);
-			else
-				gtt_entries = 0;
-			break;
-		default:
-			gtt_entries = 0;
-			break;
-		}
-	}
-	if (gtt_entries > 0) {
-		dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
-		       gtt_entries / KB(1), local ? "local" : "stolen");
-		gtt_entries /= KB(4);
-	} else {
-		dev_info(&agp_bridge->dev->dev,
-		       "no pre-allocated video memory detected\n");
-		gtt_entries = 0;
-	}
-
-	intel_private.gtt_entries = gtt_entries;
-}
-
-static void intel_i830_fini_flush(void)
-{
-	kunmap(intel_private.i8xx_page);
-	intel_private.i8xx_flush_page = NULL;
-	unmap_page_from_agp(intel_private.i8xx_page);
-
-	__free_page(intel_private.i8xx_page);
-	intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-	/* return if we've already set the flush mechanism up */
-	if (intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
-	if (!intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-	if (!intel_private.i8xx_flush_page)
-		intel_i830_fini_flush();
-}
-
-/* The chipset_flush interface needs to get data that has already been
- * flushed out of the CPU all the way out to main memory, because the GPU
- * doesn't snoop those buffers.
- *
- * The 8xx series doesn't have the same lovely interface for flushing the
- * chipset write buffers that the later chips do. According to the 865
- * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
- * that buffer out, we just fill 1KB and clflush it out, on the assumption
- * that it'll push whatever was in there out.  It appears to work.
- */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-{
-	unsigned int *pg = intel_private.i8xx_flush_page;
-
-	memset(pg, 0, 1024);
-
-	if (cpu_has_clflush)
-		clflush_cache_range(pg, 1024);
-	else if (wbinvd_on_all_cpus() != 0)
-		printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-{
-	int page_order;
-	struct aper_size_info_fixed *size;
-	int num_entries;
-	u32 temp;
-
-	size = agp_bridge->current_size;
-	page_order = size->page_order;
-	num_entries = size->num_entries;
-	agp_bridge->gatt_table_real = NULL;
-
-	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-	temp &= 0xfff80000;
-
-	intel_private.registers = ioremap(temp, 128 * 4096);
-	if (!intel_private.registers)
-		return -ENOMEM;
-
-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-	global_cache_flush();	/* FIXME: ?? */
-
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_i830_init_gtt_entries();
-
-	agp_bridge->gatt_table = NULL;
-
-	agp_bridge->gatt_bus_addr = temp;
-
-	return 0;
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-{
-	return 0;
-}
-
-static int intel_i830_fetch_size(void)
-{
-	u16 gmch_ctrl;
-	struct aper_size_info_fixed *values;
-
-	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-	if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
-	    agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
-		/* 855GM/852GM/865G has 128MB aperture size */
-		agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
-		agp_bridge->aperture_size_idx = 0;
-		return values[0].size;
-	}
-
-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
-	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
-		agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
-		agp_bridge->aperture_size_idx = 0;
-		return values[0].size;
-	} else {
-		agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
-		agp_bridge->aperture_size_idx = 1;
-		return values[1].size;
-	}
-
-	return 0;
-}
-
-static int intel_i830_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	u16 gmch_ctrl;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-	gmch_ctrl |= I830_GMCH_ENABLED;
-	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
-	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-
-	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
-			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI Posting. */
-	}
-
-	global_cache_flush();
-
-	intel_i830_setup_flush();
-	return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
-	iounmap(intel_private.registers);
-}
-
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
-				     int type)
-{
-	int i, j, num_entries;
-	void *temp;
-	int ret = -EINVAL;
-	int mask_type;
-
-	if (mem->page_count == 0)
-		goto out;
-
-	temp = agp_bridge->current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
-
-	if (pg_start < intel_private.gtt_entries) {
-		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-			   pg_start, intel_private.gtt_entries);
-
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to insert into local/stolen memory\n");
-		goto out_err;
-	}
-
-	if ((pg_start + mem->page_count) > num_entries)
-		goto out_err;
-
-	/* The i830 can't check the GTT for entries since its read only,
-	 * depend on the caller to make the correct offset decisions.
-	 */
-
-	if (type != mem->type)
-		goto out_err;
-
-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-	    mask_type != INTEL_AGP_CACHED_MEMORY)
-		goto out_err;
-
-	if (!mem->is_flushed)
-		global_cache_flush();
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		writel(agp_bridge->driver->mask_memory(agp_bridge,
-				page_to_phys(mem->pages[i]), mask_type),
-		       intel_private.registers+I810_PTE_BASE+(j*4));
-	}
-	readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-	agp_bridge->driver->tlb_flush(mem);
-
-out:
-	ret = 0;
-out_err:
-	mem->is_flushed = true;
-	return ret;
-}
-
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
-				     int type)
-{
-	int i;
-
-	if (mem->page_count == 0)
-		return 0;
-
-	if (pg_start < intel_private.gtt_entries) {
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to disable local/stolen memory\n");
-		return -EINVAL;
-	}
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-	}
-	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
-	agp_bridge->driver->tlb_flush(mem);
-	return 0;
-}
-
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-{
-	if (type == AGP_PHYS_MEMORY)
-		return alloc_agpphysmem_i8xx(pg_count, type);
-	/* always return NULL for other allocation types for now */
-	return NULL;
-}
-
-static int intel_alloc_chipset_flush_resource(void)
-{
-	int ret;
-	ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
-				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
-				     pcibios_align_resource, agp_bridge->dev);
-
-	return ret;
-}
-
-static void intel_i915_setup_chipset_flush(void)
-{
-	int ret;
-	u32 temp;
-
-	pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
-	if (!(temp & 0x1)) {
-		intel_alloc_chipset_flush_resource();
-		intel_private.resource_valid = 1;
-		pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-	} else {
-		temp &= ~1;
-
-		intel_private.resource_valid = 1;
-		intel_private.ifp_resource.start = temp;
-		intel_private.ifp_resource.end = temp + PAGE_SIZE;
-		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
-		/* some BIOSes reserve this area in a pnp some don't */
-		if (ret)
-			intel_private.resource_valid = 0;
-	}
-}
-
-static void intel_i965_g33_setup_chipset_flush(void)
-{
-	u32 temp_hi, temp_lo;
-	int ret;
-
-	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
-	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-
-	if (!(temp_lo & 0x1)) {
-
-		intel_alloc_chipset_flush_resource();
-
-		intel_private.resource_valid = 1;
-		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
-			upper_32_bits(intel_private.ifp_resource.start));
-		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-	} else {
-		u64 l64;
-
-		temp_lo &= ~0x1;
-		l64 = ((u64)temp_hi << 32) | temp_lo;
-
-		intel_private.resource_valid = 1;
-		intel_private.ifp_resource.start = l64;
-		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
-		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
-		/* some BIOSes reserve this area in a pnp some don't */
-		if (ret)
-			intel_private.resource_valid = 0;
-	}
-}
-
-static void intel_i9xx_setup_flush(void)
-{
-	/* return if already configured */
-	if (intel_private.ifp_resource.start)
-		return;
-
-	if (IS_SNB)
-		return;
-
-	/* setup a resource for this object */
-	intel_private.ifp_resource.name = "Intel Flush Page";
-	intel_private.ifp_resource.flags = IORESOURCE_MEM;
-
-	/* Setup chipset flush for 915 */
-	if (IS_I965 || IS_G33 || IS_G4X) {
-		intel_i965_g33_setup_chipset_flush();
-	} else {
-		intel_i915_setup_chipset_flush();
-	}
-
-	if (intel_private.ifp_resource.start) {
-		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
-		if (!intel_private.i9xx_flush_page)
-			dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
-	}
-}
-
-static int intel_i915_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	u16 gmch_ctrl;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-	pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-	gmch_ctrl |= I830_GMCH_ENABLED;
-	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
-	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-
-	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
-			writel(agp_bridge->scratch_page, intel_private.gtt+i);
-		}
-		readl(intel_private.gtt+i-1);	/* PCI Posting. */
-	}
-
-	global_cache_flush();
-
-	intel_i9xx_setup_flush();
-
-	return 0;
-}
-
-static void intel_i915_cleanup(void)
-{
-	if (intel_private.i9xx_flush_page)
-		iounmap(intel_private.i9xx_flush_page);
-	if (intel_private.resource_valid)
-		release_resource(&intel_private.ifp_resource);
-	intel_private.ifp_resource.start = 0;
-	intel_private.resource_valid = 0;
-	iounmap(intel_private.gtt);
-	iounmap(intel_private.registers);
-}
-
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-{
-	if (intel_private.i9xx_flush_page)
-		writel(1, intel_private.i9xx_flush_page);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
-				     int type)
-{
-	int num_entries;
-	void *temp;
-	int ret = -EINVAL;
-	int mask_type;
-
-	if (mem->page_count == 0)
-		goto out;
-
-	temp = agp_bridge->current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
-
-	if (pg_start < intel_private.gtt_entries) {
-		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-			   pg_start, intel_private.gtt_entries);
-
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to insert into local/stolen memory\n");
-		goto out_err;
-	}
-
-	if ((pg_start + mem->page_count) > num_entries)
-		goto out_err;
-
-	/* The i915 can't check the GTT for entries since it's read only;
-	 * depend on the caller to make the correct offset decisions.
-	 */
-
-	if (type != mem->type)
-		goto out_err;
-
-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-	    mask_type != INTEL_AGP_CACHED_MEMORY)
-		goto out_err;
-
-	if (!mem->is_flushed)
-		global_cache_flush();
-
-	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
-	agp_bridge->driver->tlb_flush(mem);
-
- out:
-	ret = 0;
- out_err:
-	mem->is_flushed = true;
-	return ret;
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
-				     int type)
-{
-	int i;
-
-	if (mem->page_count == 0)
-		return 0;
-
-	if (pg_start < intel_private.gtt_entries) {
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to disable local/stolen memory\n");
-		return -EINVAL;
-	}
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++)
-		writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
-	readl(intel_private.gtt+i-1);
-
-	agp_bridge->driver->tlb_flush(mem);
-	return 0;
-}
-
-/* Return the aperture size by just checking the resource length.  The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
-{
-	int num_sizes = ARRAY_SIZE(intel_i830_sizes);
-	int aper_size; /* size in megabytes */
-	int i;
-
-	aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
-	for (i = 0; i < num_sizes; i++) {
-		if (aper_size == intel_i830_sizes[i].size) {
-			agp_bridge->current_size = intel_i830_sizes + i;
-			agp_bridge->previous_size = agp_bridge->current_size;
-			return aper_size;
-		}
-	}
-
-	return 0;
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-{
-	int page_order;
-	struct aper_size_info_fixed *size;
-	int num_entries;
-	u32 temp, temp2;
-	int gtt_map_size = 256 * 1024;
-
-	size = agp_bridge->current_size;
-	page_order = size->page_order;
-	num_entries = size->num_entries;
-	agp_bridge->gatt_table_real = NULL;
-
-	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
-	if (IS_G33)
-	    gtt_map_size = 1024 * 1024; /* 1M on G33 */
-	intel_private.gtt = ioremap(temp2, gtt_map_size);
-	if (!intel_private.gtt)
-		return -ENOMEM;
-
-	intel_private.gtt_total_size = gtt_map_size / 4;
-
-	temp &= 0xfff80000;
-
-	intel_private.registers = ioremap(temp, 128 * 4096);
-	if (!intel_private.registers) {
-		iounmap(intel_private.gtt);
-		return -ENOMEM;
-	}
-
-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-	global_cache_flush();	/* FIXME: ? */
-
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_i830_init_gtt_entries();
-
-	agp_bridge->gatt_table = NULL;
-
-	agp_bridge->gatt_bus_addr = temp;
-
-	return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
-					    dma_addr_t addr, int type)
-{
-	/* Shift high bits down */
-	addr |= (addr >> 28) & 0xf0;
-
-	/* Type checking must be done elsewhere */
-	return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
-	u16 snb_gmch_ctl;
-
-	switch (agp_bridge->dev->device) {
-	case PCI_DEVICE_ID_INTEL_GM45_HB:
-	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
-	case PCI_DEVICE_ID_INTEL_Q45_HB:
-	case PCI_DEVICE_ID_INTEL_G45_HB:
-	case PCI_DEVICE_ID_INTEL_G41_HB:
-	case PCI_DEVICE_ID_INTEL_B43_HB:
-	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
-	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
-	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
-	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
-		*gtt_offset = *gtt_size = MB(2);
-		break;
-	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
-	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
-		*gtt_offset = MB(2);
-
-		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-		default:
-		case SNB_GTT_SIZE_0M:
-			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-			*gtt_size = MB(0);
-			break;
-		case SNB_GTT_SIZE_1M:
-			*gtt_size = MB(1);
-			break;
-		case SNB_GTT_SIZE_2M:
-			*gtt_size = MB(2);
-			break;
-		}
-		break;
-	default:
-		*gtt_offset = *gtt_size = KB(512);
-	}
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
-	int page_order;
-	struct aper_size_info_fixed *size;
-	int num_entries;
-	u32 temp;
-	int gtt_offset, gtt_size;
-
-	size = agp_bridge->current_size;
-	page_order = size->page_order;
-	num_entries = size->num_entries;
-	agp_bridge->gatt_table_real = NULL;
-
-	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
-	temp &= 0xfff00000;
-
-	intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
-	intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
-	if (!intel_private.gtt)
-		return -ENOMEM;
-
-	intel_private.gtt_total_size = gtt_size / 4;
-
-	intel_private.registers = ioremap(temp, 128 * 4096);
-	if (!intel_private.registers) {
-		iounmap(intel_private.gtt);
-		return -ENOMEM;
-	}
-
-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-	global_cache_flush();   /* FIXME: ? */
-
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_i830_init_gtt_entries();
-
-	agp_bridge->gatt_table = NULL;
-
-	agp_bridge->gatt_bus_addr = temp;
-
-	return 0;
-}
-
-
 static int intel_fetch_size(void)
 {
 	int i;
@@ -1982,6 +464,7 @@
 	.aperture_sizes		= intel_generic_sizes,
 	.size_type		= U16_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_configure,
 	.fetch_size		= intel_fetch_size,
 	.cleanup		= intel_cleanup,
@@ -2003,38 +486,12 @@
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_810_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i810_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 2,
-	.needs_scratch_page	= true,
-	.configure		= intel_i810_configure,
-	.fetch_size		= intel_i810_fetch_size,
-	.cleanup		= intel_i810_cleanup,
-	.tlb_flush		= intel_i810_tlbflush,
-	.mask_memory		= intel_i810_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_i810_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= agp_generic_create_gatt_table,
-	.free_gatt_table	= agp_generic_free_gatt_table,
-	.insert_memory		= intel_i810_insert_entries,
-	.remove_memory		= intel_i810_remove_entries,
-	.alloc_by_type		= intel_i810_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
-};
-
 static const struct agp_bridge_driver intel_815_driver = {
 	.owner			= THIS_MODULE,
 	.aperture_sizes		= intel_815_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 2,
+	.needs_scratch_page	= true,
 	.configure		= intel_815_configure,
 	.fetch_size		= intel_815_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2056,39 +513,12 @@
 	.agp_type_to_mask_type	= agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_830_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i830_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 4,
-	.needs_scratch_page	= true,
-	.configure		= intel_i830_configure,
-	.fetch_size		= intel_i830_fetch_size,
-	.cleanup		= intel_i830_cleanup,
-	.tlb_flush		= intel_i810_tlbflush,
-	.mask_memory		= intel_i810_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_i810_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= intel_i830_create_gatt_table,
-	.free_gatt_table	= intel_i830_free_gatt_table,
-	.insert_memory		= intel_i830_insert_entries,
-	.remove_memory		= intel_i830_remove_entries,
-	.alloc_by_type		= intel_i830_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-	.chipset_flush		= intel_i830_chipset_flush,
-};
-
 static const struct agp_bridge_driver intel_820_driver = {
 	.owner			= THIS_MODULE,
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_820_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_820_cleanup,
@@ -2115,6 +545,7 @@
 	.aperture_sizes		= intel_830mp_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
 	.configure		= intel_830mp_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2141,6 +572,7 @@
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_840_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2167,6 +599,7 @@
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_845_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2193,6 +626,7 @@
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_850_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2219,6 +653,7 @@
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_860_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2240,79 +675,12 @@
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_915_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i830_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 4,
-	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
-	.fetch_size		= intel_i9xx_fetch_size,
-	.cleanup		= intel_i915_cleanup,
-	.tlb_flush		= intel_i810_tlbflush,
-	.mask_memory		= intel_i810_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_i810_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= intel_i915_create_gatt_table,
-	.free_gatt_table	= intel_i830_free_gatt_table,
-	.insert_memory		= intel_i915_insert_entries,
-	.remove_memory		= intel_i915_remove_entries,
-	.alloc_by_type		= intel_i830_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-	.agp_map_page		= intel_agp_map_page,
-	.agp_unmap_page		= intel_agp_unmap_page,
-	.agp_map_memory		= intel_agp_map_memory,
-	.agp_unmap_memory	= intel_agp_unmap_memory,
-#endif
-};
-
-static const struct agp_bridge_driver intel_i965_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i830_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 4,
-	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
-	.fetch_size		= intel_i9xx_fetch_size,
-	.cleanup		= intel_i915_cleanup,
-	.tlb_flush		= intel_i810_tlbflush,
-	.mask_memory		= intel_i965_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_i810_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= intel_i965_create_gatt_table,
-	.free_gatt_table	= intel_i830_free_gatt_table,
-	.insert_memory		= intel_i915_insert_entries,
-	.remove_memory		= intel_i915_remove_entries,
-	.alloc_by_type		= intel_i830_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
-	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-	.agp_map_page		= intel_agp_map_page,
-	.agp_unmap_page		= intel_agp_unmap_page,
-	.agp_map_memory		= intel_agp_map_memory,
-	.agp_unmap_memory	= intel_agp_unmap_memory,
-#endif
-};
-
 static const struct agp_bridge_driver intel_7505_driver = {
 	.owner			= THIS_MODULE,
 	.aperture_sizes		= intel_8xx_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= intel_7505_configure,
 	.fetch_size		= intel_8xx_fetch_size,
 	.cleanup		= intel_8xx_cleanup,
@@ -2334,40 +702,6 @@
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_g33_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i830_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 4,
-	.needs_scratch_page	= true,
-	.configure		= intel_i915_configure,
-	.fetch_size		= intel_i9xx_fetch_size,
-	.cleanup		= intel_i915_cleanup,
-	.tlb_flush		= intel_i810_tlbflush,
-	.mask_memory		= intel_i965_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_i810_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= intel_i915_create_gatt_table,
-	.free_gatt_table	= intel_i830_free_gatt_table,
-	.insert_memory		= intel_i915_insert_entries,
-	.remove_memory		= intel_i915_remove_entries,
-	.alloc_by_type		= intel_i830_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
-	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-	.agp_map_page		= intel_agp_map_page,
-	.agp_unmap_page		= intel_agp_unmap_page,
-	.agp_map_memory		= intel_agp_map_memory,
-	.agp_unmap_memory	= intel_agp_unmap_memory,
-#endif
-};
-
 static int find_gmch(u16 device)
 {
 	struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@
 static const struct intel_driver_description {
 	unsigned int chip_id;
 	unsigned int gmch_chip_id;
-	unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
 	char *name;
 	const struct agp_bridge_driver *driver;
 	const struct agp_bridge_driver *gmch_driver;
 } intel_agp_chipsets[] = {
-	{ PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
+	{ PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
 		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
+	{ PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
 		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
+	{ PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
 		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
+	{ PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
 		&intel_815_driver, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
+	{ PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
 		&intel_830mp_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
+	{ PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
 		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+	{ PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
 		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
+	{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
 		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
+	{ PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
 		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+	{ PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
+	{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
+	{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
+	{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
+	{ PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+	{ PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
 		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
+	{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
+	{ PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
+	{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
+	{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
+	{ PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+	{ PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
 		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
+	{ PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+	{ PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
+	{ PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
 	    "GM45", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
 	    "Eaglelake", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
 	    "Q45/Q43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
 	    "G45/G43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
 	    "B43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
 	    "G41", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
 	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
 	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
 	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
 	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
 	    "Sandybridge", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
 	    "Sandybridge", NULL, &intel_i965_driver },
-	{ 0, 0, 0, NULL, NULL, NULL }
+	{ 0, 0, NULL, NULL, NULL }
 };
 
+static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+				      struct agp_bridge_data *bridge)
+{
+	int i;
+	bridge->driver = NULL;
+
+	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+			bridge->driver =
+				intel_agp_chipsets[i].gmch_driver;
+			break;
+		}
+	}
+
+	if (!bridge->driver)
+		return 0;
+
+	bridge->dev_private_data = &intel_private;
+	bridge->dev = pdev;
+
+	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+	if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+		if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+			dev_err(&intel_private.pcidev->dev,
+				"set gfx device dma mask 36bit failed!\n");
+		else
+			pci_set_consistent_dma_mask(intel_private.pcidev,
+						    DMA_BIT_MASK(36));
+	}
+
+	return 1;
+}
+
 static int __devinit agp_intel_probe(struct pci_dev *pdev,
 				     const struct pci_device_id *ent)
 {
@@ -2503,22 +871,18 @@
 	if (!bridge)
 		return -ENOMEM;
 
+	bridge->capndx = cap_ptr;
+
+	if (intel_gmch_probe(pdev, bridge))
+		goto found_gmch;
+
 	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
 		/* In case that multiple models of gfx chip may
 		   stand on same host bridge type, this can be
 		   sure we detect the right IGD. */
 		if (pdev->device == intel_agp_chipsets[i].chip_id) {
-			if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
-				find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
-				bridge->driver =
-					intel_agp_chipsets[i].gmch_driver;
-				break;
-			} else if (intel_agp_chipsets[i].multi_gmch_chip) {
-				continue;
-			} else {
-				bridge->driver = intel_agp_chipsets[i].driver;
-				break;
-			}
+			bridge->driver = intel_agp_chipsets[i].driver;
+			break;
 		}
 	}
 
@@ -2530,18 +894,16 @@
 		return -ENODEV;
 	}
 
-	if (bridge->driver == NULL) {
-		/* bridge has no AGP and no IGD detected */
+	if (!bridge->driver) {
 		if (cap_ptr)
 			dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
-				 intel_agp_chipsets[i].gmch_chip_id);
+			    	 intel_agp_chipsets[i].gmch_chip_id);
 		agp_put_bridge(bridge);
 		return -ENODEV;
 	}
 
 	bridge->dev = pdev;
-	bridge->capndx = cap_ptr;
-	bridge->dev_private_data = &intel_private;
+	bridge->dev_private_data = NULL;
 
 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
@@ -2577,15 +939,7 @@
 				&bridge->mode);
 	}
 
-	if (bridge->driver->mask_memory == intel_i965_mask_memory) {
-		if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
-			dev_err(&intel_private.pcidev->dev,
-				"set gfx device dma mask 36bit failed!\n");
-		else
-			pci_set_consistent_dma_mask(intel_private.pcidev,
-						    DMA_BIT_MASK(36));
-	}
-
+found_gmch:
 	pci_set_drvdata(pdev, bridge);
 	err = agp_add_bridge(bridge);
 	if (!err)
@@ -2611,22 +965,7 @@
 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
 	int ret_val;
 
-	if (bridge->driver == &intel_generic_driver)
-		intel_configure();
-	else if (bridge->driver == &intel_850_driver)
-		intel_850_configure();
-	else if (bridge->driver == &intel_845_driver)
-		intel_845_configure();
-	else if (bridge->driver == &intel_830mp_driver)
-		intel_830mp_configure();
-	else if (bridge->driver == &intel_915_driver)
-		intel_i915_configure();
-	else if (bridge->driver == &intel_830_driver)
-		intel_i830_configure();
-	else if (bridge->driver == &intel_810_driver)
-		intel_i810_configure();
-	else if (bridge->driver == &intel_i965_driver)
-		intel_i915_configure();
+	bridge->driver->configure();
 
 	ret_val = agp_rebind_memory();
 	if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 0000000..2547465
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+
+/* Intel registers */
+#define INTEL_APSIZE	0xb4
+#define INTEL_ATTBASE	0xb8
+#define INTEL_AGPCTRL	0xb0
+#define INTEL_NBXCFG	0x50
+#define INTEL_ERRSTS	0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL			0x52
+#define I830_GMCH_ENABLED		0x4
+#define I830_GMCH_MEM_MASK		0x1
+#define I830_GMCH_MEM_64M		0x1
+#define I830_GMCH_MEM_128M		0
+#define I830_GMCH_GMS_MASK		0x70
+#define I830_GMCH_GMS_DISABLED		0x00
+#define I830_GMCH_GMS_LOCAL		0x10
+#define I830_GMCH_GMS_STOLEN_512	0x20
+#define I830_GMCH_GMS_STOLEN_1024	0x30
+#define I830_GMCH_GMS_STOLEN_8192	0x40
+#define I830_RDRAM_CHANNEL_TYPE		0x03010
+#define I830_RDRAM_ND(x)		(((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x)		(((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS	0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK		0xF0
+#define I855_GMCH_GMS_STOLEN_0M		0x0
+#define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+#define I85X_CAPID			0x44
+#define I85X_VARIANT_MASK		0x7
+#define I85X_VARIANT_SHIFT		5
+#define I855_GME			0x0
+#define I855_GM				0x4
+#define I852_GME			0x2
+#define I852_GM				0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM		0x51
+#define INTEL_I845_ERRSTS	0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG	0x50
+#define INTEL_I860_ERRSTS	0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR		0x10
+#define I810_MMADDR		0x14
+#define I810_PTE_BASE		0x10000
+#define I810_PTE_MAIN_UNCACHED	0x00000000
+#define I810_PTE_LOCAL		0x00000002
+#define I810_PTE_VALID		0x00000001
+#define I830_PTE_SYSTEM_CACHED  0x00000006
+#define I810_SMRAM_MISCC	0x70
+#define I810_GFX_MEM_WIN_SIZE	0x00010000
+#define I810_GFX_MEM_WIN_32M	0x00010000
+#define I810_GMS		0x000000c0
+#define I810_GMS_DISABLE	0x00000000
+#define I810_PGETBL_CTL		0x2020
+#define I810_PGETBL_ENABLED	0x00000001
+#define I965_PGETBL_SIZE_MASK	0x0000000e
+#define I965_PGETBL_SIZE_512KB	(0 << 1)
+#define I965_PGETBL_SIZE_256KB	(1 << 1)
+#define I965_PGETBL_SIZE_128KB	(2 << 1)
+#define I965_PGETBL_SIZE_1MB	(3 << 1)
+#define I965_PGETBL_SIZE_2MB	(4 << 1)
+#define I965_PGETBL_SIZE_1_5MB	(5 << 1)
+#define G33_PGETBL_SIZE_MASK    (3 << 8)
+#define G33_PGETBL_SIZE_1M      (1 << 8)
+#define G33_PGETBL_SIZE_2M      (2 << 8)
+
+#define I810_DRAM_CTL		0x3000
+#define I810_DRAM_ROW_0		0x00000001
+#define I810_DRAM_ROW_0_SDRAM	0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT	0x51
+#define INTEL_815_ATTBASE_MASK	~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR		0x51
+#define INTEL_I820_ERRSTS	0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG	0x50
+#define INTEL_I840_ERRSTS	0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG	0x50
+#define INTEL_I850_ERRSTS	0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR	0x18
+#define I915_MMADDR	0x10
+#define I915_PTEADDR	0x1C
+#define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
+
+#define I915_IFPADDR    0x60
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR    0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE	0x74
+#define INTEL_I7505_NCAPID	0x60
+#define INTEL_I7505_NISTAT	0x6c
+#define INTEL_I7505_ATTBASE	0x78
+#define INTEL_I7505_ERRSTS	0x42
+#define INTEL_I7505_AGPCTRL	0x70
+#define INTEL_I7505_MCHCFG	0x50
+
+#define SNB_GMCH_CTRL	0x50
+#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
+#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
+#define SNB_GTT_SIZE_0M			(0 << 8)
+#define SNB_GTT_SIZE_1M			(1 << 8)
+#define SNB_GTT_SIZE_2M			(2 << 8)
+#define SNB_GTT_SIZE_MASK		(3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB	0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG	0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG      0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB     0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG     0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG       0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG       0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG      0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
+#define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+
+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+		IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 0000000..e8ea682
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
+static const struct aper_size_info_fixed intel_i810_sizes[] =
+{
+	{64, 16384, 4},
+	/* The 32M mode still requires a 64k gatt */
+	{32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY	1
+#define AGP_PHYS_MEMORY		2
+#define INTEL_AGP_CACHED_MEMORY 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+	{.mask = I810_PTE_VALID, .type = 0},
+	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+	{.mask = I810_PTE_VALID, .type = 0},
+	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+	 .type = INTEL_AGP_CACHED_MEMORY}
+};
+
+static struct _intel_private {
+	struct pci_dev *pcidev;	/* device one */
+	u8 __iomem *registers;
+	u32 __iomem *gtt;		/* I915G */
+	int num_dcache_entries;
+	/* gtt_entries is the number of gtt entries that are already mapped
+	 * to stolen memory.  Stolen memory is larger than the memory mapped
+	 * through gtt_entries, as it includes some reserved space for the BIOS
+	 * popup and for the GTT.
+	 */
+	int gtt_entries;			/* i830+ */
+	int gtt_total_size;
+	union {
+		void __iomem *i9xx_flush_page;
+		void *i8xx_flush_page;
+	};
+	struct page *i8xx_page;
+	struct resource ifp_resource;
+	int resource_valid;
+} intel_private;
+
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+	*ret = pci_map_page(intel_private.pcidev, page, 0,
+			    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+		return -EINVAL;
+	return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+	pci_unmap_page(intel_private.pcidev, dma,
+		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+	struct sg_table st;
+
+	st.sgl = mem->sg_list;
+	st.orig_nents = st.nents = mem->page_count;
+
+	sg_free_table(&st);
+
+	mem->sg_list = NULL;
+	mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+	struct sg_table st;
+	struct scatterlist *sg;
+	int i;
+
+	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+		return -ENOMEM;
+
+	mem->sg_list = sg = st.sgl;
+
+	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
+	if (unlikely(!mem->num_sg)) {
+		intel_agp_free_sglist(mem);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
+	intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+					off_t pg_start, int mask_type)
+{
+	struct scatterlist *sg;
+	int i, j;
+
+	j = pg_start;
+
+	WARN_ON(!mem->num_sg);
+
+	if (mem->num_sg == mem->page_count) {
+		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+			writel(agp_bridge->driver->mask_memory(agp_bridge,
+					sg_dma_address(sg), mask_type),
+					intel_private.gtt+j);
+			j++;
+		}
+	} else {
+		/* sg may merge pages, but we have to separate
+		 * per-page addr for GTT */
+		unsigned int len, m;
+
+		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+			len = sg_dma_len(sg) / PAGE_SIZE;
+			for (m = 0; m < len; m++) {
+				writel(agp_bridge->driver->mask_memory(agp_bridge,
+								       sg_dma_address(sg) + m * PAGE_SIZE,
+								       mask_type),
+				       intel_private.gtt+j);
+				j++;
+			}
+		}
+	}
+	readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+					off_t pg_start, int mask_type)
+{
+	int i, j;
+	u32 cache_bits = 0;
+
+	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+	{
+		cache_bits = I830_PTE_SYSTEM_CACHED;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		writel(agp_bridge->driver->mask_memory(agp_bridge,
+				page_to_phys(mem->pages[i]), mask_type),
+		       intel_private.gtt+j);
+	}
+
+	readl(intel_private.gtt+j-1);
+}
+
+#endif
+
+static int intel_i810_fetch_size(void)
+{
+	u32 smram_miscc;
+	struct aper_size_info_fixed *values;
+
+	pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+		dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+		return 0;
+	}
+	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+		agp_bridge->current_size = (void *) (values + 1);
+		agp_bridge->aperture_size_idx = 1;
+		return values[1].size;
+	} else {
+		agp_bridge->current_size = (void *) (values);
+		agp_bridge->aperture_size_idx = 0;
+		return values[0].size;
+	}
+
+	return 0;
+}
+
+static int intel_i810_configure(void)
+{
+	struct aper_size_info_fixed *current_size;
+	u32 temp;
+	int i;
+
+	current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+	if (!intel_private.registers) {
+		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+		temp &= 0xfff80000;
+
+		intel_private.registers = ioremap(temp, 128 * 4096);
+		if (!intel_private.registers) {
+			dev_err(&intel_private.pcidev->dev,
+				"can't remap memory\n");
+			return -ENOMEM;
+		}
+	}
+
+	if ((readl(intel_private.registers+I810_DRAM_CTL)
+		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+		/* This will need to be dynamically assigned */
+		dev_info(&intel_private.pcidev->dev,
+			 "detected 4MB dedicated video ram\n");
+		intel_private.num_dcache_entries = 1024;
+	}
+	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+
+	if (agp_bridge->driver->needs_scratch_page) {
+		for (i = 0; i < current_size->num_entries; i++) {
+			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+		}
+		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
+	}
+	global_cache_flush();
+	return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+	writel(0, intel_private.registers+I810_PGETBL_CTL);
+	readl(intel_private.registers);	/* PCI Posting. */
+	iounmap(intel_private.registers);
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+	return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+	struct page *page;
+
+	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+	if (page == NULL)
+		return NULL;
+
+	if (set_pages_uc(page, 4) < 0) {
+		set_pages_wb(page, 4);
+		__free_pages(page, 2);
+		return NULL;
+	}
+	get_page(page);
+	atomic_inc(&agp_bridge->current_memory_agp);
+	return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+	if (page == NULL)
+		return;
+
+	set_pages_wb(page, 4);
+	put_page(page);
+	__free_pages(page, 2);
+	atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+					int type)
+{
+	if (type < AGP_USER_TYPES)
+		return type;
+	else if (type == AGP_USER_CACHED_MEMORY)
+		return INTEL_AGP_CACHED_MEMORY;
+	else
+		return 0;
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+				int type)
+{
+	int i, j, num_entries;
+	void *temp;
+	int ret = -EINVAL;
+	int mask_type;
+
+	if (mem->page_count == 0)
+		goto out;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		goto out_err;
+
+
+	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+			ret = -EBUSY;
+			goto out_err;
+		}
+	}
+
+	if (type != mem->type)
+		goto out_err;
+
+	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+	switch (mask_type) {
+	case AGP_DCACHE_MEMORY:
+		if (!mem->is_flushed)
+			global_cache_flush();
+		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+			       intel_private.registers+I810_PTE_BASE+(i*4));
+		}
+		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+		break;
+	case AGP_PHYS_MEMORY:
+	case AGP_NORMAL_MEMORY:
+		if (!mem->is_flushed)
+			global_cache_flush();
+		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+			writel(agp_bridge->driver->mask_memory(agp_bridge,
+					page_to_phys(mem->pages[i]), mask_type),
+			       intel_private.registers+I810_PTE_BASE+(j*4));
+		}
+		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+		break;
+	default:
+		goto out_err;
+	}
+
+out:
+	ret = 0;
+out_err:
+	mem->is_flushed = true;
+	return ret;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+				int type)
+{
+	int i;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+	}
+	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+	return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+	struct agp_memory *new;
+	struct page *page;
+
+	switch (pg_count) {
+	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+		break;
+	case 4:
+		/* kludge to get 4 physical pages for ARGB cursor */
+		page = i8xx_alloc_pages();
+		break;
+	default:
+		return NULL;
+	}
+
+	if (page == NULL)
+		return NULL;
+
+	new = agp_create_memory(pg_count);
+	if (new == NULL)
+		return NULL;
+
+	new->pages[0] = page;
+	if (pg_count == 4) {
+		/* kludge to get 4 physical pages for ARGB cursor */
+		new->pages[1] = new->pages[0] + 1;
+		new->pages[2] = new->pages[1] + 1;
+		new->pages[3] = new->pages[2] + 1;
+	}
+	new->page_count = pg_count;
+	new->num_scratch_pages = pg_count;
+	new->type = AGP_PHYS_MEMORY;
+	new->physical = page_to_phys(new->pages[0]);
+	return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+	struct agp_memory *new;
+
+	if (type == AGP_DCACHE_MEMORY) {
+		if (pg_count != intel_private.num_dcache_entries)
+			return NULL;
+
+		new = agp_create_memory(1);
+		if (new == NULL)
+			return NULL;
+
+		new->type = AGP_DCACHE_MEMORY;
+		new->page_count = pg_count;
+		new->num_scratch_pages = 0;
+		agp_free_page_array(new);
+		return new;
+	}
+	if (type == AGP_PHYS_MEMORY)
+		return alloc_agpphysmem_i8xx(pg_count, type);
+	return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+	agp_free_key(curr->key);
+	if (curr->type == AGP_PHYS_MEMORY) {
+		if (curr->page_count == 4)
+			i8xx_destroy_pages(curr->pages[0]);
+		else {
+			agp_bridge->driver->agp_destroy_page(curr->pages[0],
+							     AGP_PAGE_DESTROY_UNMAP);
+			agp_bridge->driver->agp_destroy_page(curr->pages[0],
+							     AGP_PAGE_DESTROY_FREE);
+		}
+		agp_free_page_array(curr);
+	}
+	kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+					    dma_addr_t addr, int type)
+{
+	/* Type checking must be done elsewhere */
+	return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+	{128, 32768, 5},
+	/* The 64M mode still requires a 128k gatt */
+	{64, 16384, 5},
+	{256, 65536, 6},
+	{512, 131072, 7},
+};
+
+static void intel_i830_init_gtt_entries(void)
+{
+	u16 gmch_ctrl;
+	int gtt_entries = 0;
+	u8 rdct;
+	int local = 0;
+	static const int ddt[4] = { 0, 16, 32, 64 };
+	int size; /* reserved space (in kb) at the top of stolen memory */
+
+	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+	if (IS_I965) {
+		u32 pgetbl_ctl;
+		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+		/* The 965 has a field telling us the size of the GTT,
+		 * which may be larger than what is necessary to map the
+		 * aperture.
+		 */
+		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+		case I965_PGETBL_SIZE_128KB:
+			size = 128;
+			break;
+		case I965_PGETBL_SIZE_256KB:
+			size = 256;
+			break;
+		case I965_PGETBL_SIZE_512KB:
+			size = 512;
+			break;
+		case I965_PGETBL_SIZE_1MB:
+			size = 1024;
+			break;
+		case I965_PGETBL_SIZE_2MB:
+			size = 2048;
+			break;
+		case I965_PGETBL_SIZE_1_5MB:
+			size = 1024 + 512;
+			break;
+		default:
+			dev_info(&intel_private.pcidev->dev,
+				 "unknown page table size, assuming 512KB\n");
+			size = 512;
+		}
+		size += 4; /* add in BIOS popup space */
+	} else if (IS_G33 && !IS_PINEVIEW) {
+	/* G33's GTT size defined in gmch_ctrl */
+		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+		case G33_PGETBL_SIZE_1M:
+			size = 1024;
+			break;
+		case G33_PGETBL_SIZE_2M:
+			size = 2048;
+			break;
+		default:
+			dev_info(&agp_bridge->dev->dev,
+				 "unknown page table size 0x%x, assuming 512KB\n",
+				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
+			size = 512;
+		}
+		size += 4;
+	} else if (IS_G4X || IS_PINEVIEW) {
+		/* On 4 series hardware, GTT stolen is separate from graphics
+		 * stolen, ignore it in stolen gtt entries counting.  However,
+		 * 4KB of the stolen memory doesn't get mapped to the GTT.
+		 */
+		size = 4;
+	} else {
+		/* On previous hardware, the GTT size was just what was
+		 * required to map the aperture.
+		 */
+		size = agp_bridge->driver->fetch_size() + 4;
+	}
+
+	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+		case I830_GMCH_GMS_STOLEN_512:
+			gtt_entries = KB(512) - KB(size);
+			break;
+		case I830_GMCH_GMS_STOLEN_1024:
+			gtt_entries = MB(1) - KB(size);
+			break;
+		case I830_GMCH_GMS_STOLEN_8192:
+			gtt_entries = MB(8) - KB(size);
+			break;
+		case I830_GMCH_GMS_LOCAL:
+			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+			gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+					MB(ddt[I830_RDRAM_DDT(rdct)]);
+			local = 1;
+			break;
+		default:
+			gtt_entries = 0;
+			break;
+		}
+	} else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+		   agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+		/*
+		 * SandyBridge has new memory control reg at 0x50.w
+		 */
+		u16 snb_gmch_ctl;
+		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+		case SNB_GMCH_GMS_STOLEN_32M:
+			gtt_entries = MB(32) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_64M:
+			gtt_entries = MB(64) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_96M:
+			gtt_entries = MB(96) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_128M:
+			gtt_entries = MB(128) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_160M:
+			gtt_entries = MB(160) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_192M:
+			gtt_entries = MB(192) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_224M:
+			gtt_entries = MB(224) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_256M:
+			gtt_entries = MB(256) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_288M:
+			gtt_entries = MB(288) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_320M:
+			gtt_entries = MB(320) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_352M:
+			gtt_entries = MB(352) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_384M:
+			gtt_entries = MB(384) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_416M:
+			gtt_entries = MB(416) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_448M:
+			gtt_entries = MB(448) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_480M:
+			gtt_entries = MB(480) - KB(size);
+			break;
+		case SNB_GMCH_GMS_STOLEN_512M:
+			gtt_entries = MB(512) - KB(size);
+			break;
+		}
+	} else {
+		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+		case I855_GMCH_GMS_STOLEN_1M:
+			gtt_entries = MB(1) - KB(size);
+			break;
+		case I855_GMCH_GMS_STOLEN_4M:
+			gtt_entries = MB(4) - KB(size);
+			break;
+		case I855_GMCH_GMS_STOLEN_8M:
+			gtt_entries = MB(8) - KB(size);
+			break;
+		case I855_GMCH_GMS_STOLEN_16M:
+			gtt_entries = MB(16) - KB(size);
+			break;
+		case I855_GMCH_GMS_STOLEN_32M:
+			gtt_entries = MB(32) - KB(size);
+			break;
+		case I915_GMCH_GMS_STOLEN_48M:
+			/* Check it's really I915G */
+			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+				gtt_entries = MB(48) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case I915_GMCH_GMS_STOLEN_64M:
+			/* Check it's really I915G */
+			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+				gtt_entries = MB(64) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case G33_GMCH_GMS_STOLEN_128M:
+			if (IS_G33 || IS_I965 || IS_G4X)
+				gtt_entries = MB(128) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case G33_GMCH_GMS_STOLEN_256M:
+			if (IS_G33 || IS_I965 || IS_G4X)
+				gtt_entries = MB(256) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case INTEL_GMCH_GMS_STOLEN_96M:
+			if (IS_I965 || IS_G4X)
+				gtt_entries = MB(96) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case INTEL_GMCH_GMS_STOLEN_160M:
+			if (IS_I965 || IS_G4X)
+				gtt_entries = MB(160) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case INTEL_GMCH_GMS_STOLEN_224M:
+			if (IS_I965 || IS_G4X)
+				gtt_entries = MB(224) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		case INTEL_GMCH_GMS_STOLEN_352M:
+			if (IS_I965 || IS_G4X)
+				gtt_entries = MB(352) - KB(size);
+			else
+				gtt_entries = 0;
+			break;
+		default:
+			gtt_entries = 0;
+			break;
+		}
+	}
+	if (gtt_entries > 0) {
+		dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+		       gtt_entries / KB(1), local ? "local" : "stolen");
+		gtt_entries /= KB(4);
+	} else {
+		dev_info(&agp_bridge->dev->dev,
+		       "no pre-allocated video memory detected\n");
+		gtt_entries = 0;
+	}
+
+	intel_private.gtt_entries = gtt_entries;
+}
+
+static void intel_i830_fini_flush(void)
+{
+	kunmap(intel_private.i8xx_page);
+	intel_private.i8xx_flush_page = NULL;
+	unmap_page_from_agp(intel_private.i8xx_page);
+
+	__free_page(intel_private.i8xx_page);
+	intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+	/* return if we've already set the flush mechanism up */
+	if (intel_private.i8xx_page)
+		return;
+
+	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+	if (!intel_private.i8xx_page)
+		return;
+
+	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+	if (!intel_private.i8xx_flush_page)
+		intel_i830_fini_flush();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out.  It appears to work.
+ */
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+	unsigned int *pg = intel_private.i8xx_flush_page;
+
+	memset(pg, 0, 1024);
+
+	if (cpu_has_clflush)
+		clflush_cache_range(pg, 1024);
+	else if (wbinvd_on_all_cpus() != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	int page_order;
+	struct aper_size_info_fixed *size;
+	int num_entries;
+	u32 temp;
+
+	size = agp_bridge->current_size;
+	page_order = size->page_order;
+	num_entries = size->num_entries;
+	agp_bridge->gatt_table_real = NULL;
+
+	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+	temp &= 0xfff80000;
+
+	intel_private.registers = ioremap(temp, 128 * 4096);
+	if (!intel_private.registers)
+		return -ENOMEM;
+
+	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+	global_cache_flush();	/* FIXME: ?? */
+
+	/* we have to call this as early as possible after the MMIO base address is known */
+	intel_i830_init_gtt_entries();
+
+	agp_bridge->gatt_table = NULL;
+
+	agp_bridge->gatt_bus_addr = temp;
+
+	return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+	return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+	u16 gmch_ctrl;
+	struct aper_size_info_fixed *values;
+
+	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+	if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+	    agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+		/* 855GM/852GM/865G has 128MB aperture size */
+		agp_bridge->current_size = (void *) values;
+		agp_bridge->aperture_size_idx = 0;
+		return values[0].size;
+	}
+
+	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+		agp_bridge->current_size = (void *) values;
+		agp_bridge->aperture_size_idx = 0;
+		return values[0].size;
+	} else {
+		agp_bridge->current_size = (void *) (values + 1);
+		agp_bridge->aperture_size_idx = 1;
+		return values[1].size;
+	}
+
+	return 0;
+}
+
+static int intel_i830_configure(void)
+{
+	struct aper_size_info_fixed *current_size;
+	u32 temp;
+	u16 gmch_ctrl;
+	int i;
+
+	current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+	gmch_ctrl |= I830_GMCH_ENABLED;
+	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+
+	if (agp_bridge->driver->needs_scratch_page) {
+		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+		}
+		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI Posting. */
+	}
+
+	global_cache_flush();
+
+	intel_i830_setup_flush();
+	return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+	iounmap(intel_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+				     int type)
+{
+	int i, j, num_entries;
+	void *temp;
+	int ret = -EINVAL;
+	int mask_type;
+
+	if (mem->page_count == 0)
+		goto out;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+
+	if (pg_start < intel_private.gtt_entries) {
+		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+			   pg_start, intel_private.gtt_entries);
+
+		dev_info(&intel_private.pcidev->dev,
+			 "trying to insert into local/stolen memory\n");
+		goto out_err;
+	}
+
+	if ((pg_start + mem->page_count) > num_entries)
+		goto out_err;
+
+	/* The i830 can't check the GTT for entries since its read only,
+	 * depend on the caller to make the correct offset decisions.
+	 */
+
+	if (type != mem->type)
+		goto out_err;
+
+	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+	    mask_type != INTEL_AGP_CACHED_MEMORY)
+		goto out_err;
+
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		writel(agp_bridge->driver->mask_memory(agp_bridge,
+				page_to_phys(mem->pages[i]), mask_type),
+		       intel_private.registers+I810_PTE_BASE+(j*4));
+	}
+	readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+out:
+	ret = 0;
+out_err:
+	mem->is_flushed = true;
+	return ret;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+				     int type)
+{
+	int i;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	if (pg_start < intel_private.gtt_entries) {
+		dev_info(&intel_private.pcidev->dev,
+			 "trying to disable local/stolen memory\n");
+		return -EINVAL;
+	}
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+	}
+	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+	return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+{
+	if (type == AGP_PHYS_MEMORY)
+		return alloc_agpphysmem_i8xx(pg_count, type);
+	/* always return NULL for other allocation types for now */
+	return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+	int ret;
+	ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+				     pcibios_align_resource, agp_bridge->dev);
+
+	return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+	int ret;
+	u32 temp;
+
+	pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+	if (!(temp & 0x1)) {
+		intel_alloc_chipset_flush_resource();
+		intel_private.resource_valid = 1;
+		pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+	} else {
+		temp &= ~1;
+
+		intel_private.resource_valid = 1;
+		intel_private.ifp_resource.start = temp;
+		intel_private.ifp_resource.end = temp + PAGE_SIZE;
+		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+		/* some BIOSes reserve this area in a pnp some don't */
+		if (ret)
+			intel_private.resource_valid = 0;
+	}
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+	u32 temp_hi, temp_lo;
+	int ret;
+
+	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+	if (!(temp_lo & 0x1)) {
+
+		intel_alloc_chipset_flush_resource();
+
+		intel_private.resource_valid = 1;
+		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+			upper_32_bits(intel_private.ifp_resource.start));
+		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+	} else {
+		u64 l64;
+
+		temp_lo &= ~0x1;
+		l64 = ((u64)temp_hi << 32) | temp_lo;
+
+		intel_private.resource_valid = 1;
+		intel_private.ifp_resource.start = l64;
+		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+		/* some BIOSes reserve this area in a pnp some don't */
+		if (ret)
+			intel_private.resource_valid = 0;
+	}
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+	/* return if already configured */
+	if (intel_private.ifp_resource.start)
+		return;
+
+	if (IS_SNB)
+		return;
+
+	/* setup a resource for this object */
+	intel_private.ifp_resource.name = "Intel Flush Page";
+	intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+	/* Setup chipset flush for 915 */
+	if (IS_I965 || IS_G33 || IS_G4X) {
+		intel_i965_g33_setup_chipset_flush();
+	} else {
+		intel_i915_setup_chipset_flush();
+	}
+
+	if (intel_private.ifp_resource.start) {
+		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+		if (!intel_private.i9xx_flush_page)
+			dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+	}
+}
+
+static int intel_i915_configure(void)
+{
+	struct aper_size_info_fixed *current_size;
+	u32 temp;
+	u16 gmch_ctrl;
+	int i;
+
+	current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+	pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+
+	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+	gmch_ctrl |= I830_GMCH_ENABLED;
+	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+
+	if (agp_bridge->driver->needs_scratch_page) {
+		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+			writel(agp_bridge->scratch_page, intel_private.gtt+i);
+		}
+		readl(intel_private.gtt+i-1);	/* PCI Posting. */
+	}
+
+	global_cache_flush();
+
+	intel_i9xx_setup_flush();
+
+	return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+	if (intel_private.i9xx_flush_page)
+		iounmap(intel_private.i9xx_flush_page);
+	if (intel_private.resource_valid)
+		release_resource(&intel_private.ifp_resource);
+	intel_private.ifp_resource.start = 0;
+	intel_private.resource_valid = 0;
+	iounmap(intel_private.gtt);
+	iounmap(intel_private.registers);
+}
+
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+{
+	if (intel_private.i9xx_flush_page)
+		writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+				     int type)
+{
+	int num_entries;
+	void *temp;
+	int ret = -EINVAL;
+	int mask_type;
+
+	if (mem->page_count == 0)
+		goto out;
+
+	temp = agp_bridge->current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+
+	if (pg_start < intel_private.gtt_entries) {
+		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+			   pg_start, intel_private.gtt_entries);
+
+		dev_info(&intel_private.pcidev->dev,
+			 "trying to insert into local/stolen memory\n");
+		goto out_err;
+	}
+
+	if ((pg_start + mem->page_count) > num_entries)
+		goto out_err;
+
+	/* The i915 can't check the GTT for entries since it's read only;
+	 * depend on the caller to make the correct offset decisions.
+	 */
+
+	if (type != mem->type)
+		goto out_err;
+
+	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+	    mask_type != INTEL_AGP_CACHED_MEMORY)
+		goto out_err;
+
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+
+ out:
+	ret = 0;
+ out_err:
+	mem->is_flushed = true;
+	return ret;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+				     int type)
+{
+	int i;
+
+	if (mem->page_count == 0)
+		return 0;
+
+	if (pg_start < intel_private.gtt_entries) {
+		dev_info(&intel_private.pcidev->dev,
+			 "trying to disable local/stolen memory\n");
+		return -EINVAL;
+	}
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++)
+		writel(agp_bridge->scratch_page, intel_private.gtt+i);
+
+	readl(intel_private.gtt+i-1);
+
+	return 0;
+}
+
+/* Return the aperture size by just checking the resource length.  The effect
+ * described in the spec of the MSAC registers is just changing of the
+ * resource size.
+ */
+static int intel_i9xx_fetch_size(void)
+{
+	int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+	int aper_size; /* size in megabytes */
+	int i;
+
+	aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+
+	for (i = 0; i < num_sizes; i++) {
+		if (aper_size == intel_i830_sizes[i].size) {
+			agp_bridge->current_size = intel_i830_sizes + i;
+			return aper_size;
+		}
+	}
+
+	return 0;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	int page_order;
+	struct aper_size_info_fixed *size;
+	int num_entries;
+	u32 temp, temp2;
+	int gtt_map_size = 256 * 1024;
+
+	size = agp_bridge->current_size;
+	page_order = size->page_order;
+	num_entries = size->num_entries;
+	agp_bridge->gatt_table_real = NULL;
+
+	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+
+	if (IS_G33)
+	    gtt_map_size = 1024 * 1024; /* 1M on G33 */
+	intel_private.gtt = ioremap(temp2, gtt_map_size);
+	if (!intel_private.gtt)
+		return -ENOMEM;
+
+	intel_private.gtt_total_size = gtt_map_size / 4;
+
+	temp &= 0xfff80000;
+
+	intel_private.registers = ioremap(temp, 128 * 4096);
+	if (!intel_private.registers) {
+		iounmap(intel_private.gtt);
+		return -ENOMEM;
+	}
+
+	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+	global_cache_flush();	/* FIXME: ? */
+
+	/* we have to call this as early as possible after the MMIO base address is known */
+	intel_i830_init_gtt_entries();
+
+	agp_bridge->gatt_table = NULL;
+
+	agp_bridge->gatt_bus_addr = temp;
+
+	return 0;
+}
+
+/*
+ * The i965 supports 36-bit physical addresses, but to keep
+ * the format of the GTT the same, the bits that don't fit
+ * in a 32-bit word are shifted down to bits 4..7.
+ *
+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+ * is always zero on 32-bit architectures, so no need to make
+ * this conditional.
+ */
+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+					    dma_addr_t addr, int type)
+{
+	/* Shift high bits down */
+	addr |= (addr >> 28) & 0xf0;
+
+	/* Type checking must be done elsewhere */
+	return addr | bridge->driver->masks[type].mask;
+}
+
+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+{
+	u16 snb_gmch_ctl;
+
+	switch (agp_bridge->dev->device) {
+	case PCI_DEVICE_ID_INTEL_GM45_HB:
+	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+	case PCI_DEVICE_ID_INTEL_Q45_HB:
+	case PCI_DEVICE_ID_INTEL_G45_HB:
+	case PCI_DEVICE_ID_INTEL_G41_HB:
+	case PCI_DEVICE_ID_INTEL_B43_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+		*gtt_offset = *gtt_size = MB(2);
+		break;
+	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+		*gtt_offset = MB(2);
+
+		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+		default:
+		case SNB_GTT_SIZE_0M:
+			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+			*gtt_size = MB(0);
+			break;
+		case SNB_GTT_SIZE_1M:
+			*gtt_size = MB(1);
+			break;
+		case SNB_GTT_SIZE_2M:
+			*gtt_size = MB(2);
+			break;
+		}
+		break;
+	default:
+		*gtt_offset = *gtt_size = KB(512);
+	}
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+	int page_order;
+	struct aper_size_info_fixed *size;
+	int num_entries;
+	u32 temp;
+	int gtt_offset, gtt_size;
+
+	size = agp_bridge->current_size;
+	page_order = size->page_order;
+	num_entries = size->num_entries;
+	agp_bridge->gatt_table_real = NULL;
+
+	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+
+	temp &= 0xfff00000;
+
+	intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+
+	intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+
+	if (!intel_private.gtt)
+		return -ENOMEM;
+
+	intel_private.gtt_total_size = gtt_size / 4;
+
+	intel_private.registers = ioremap(temp, 128 * 4096);
+	if (!intel_private.registers) {
+		iounmap(intel_private.gtt);
+		return -ENOMEM;
+	}
+
+	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+	global_cache_flush();   /* FIXME: ? */
+
+	/* we have to call this as early as possible after the MMIO base address is known */
+	intel_i830_init_gtt_entries();
+
+	agp_bridge->gatt_table = NULL;
+
+	agp_bridge->gatt_bus_addr = temp;
+
+	return 0;
+}
+
+static const struct agp_bridge_driver intel_810_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_i810_sizes,
+	.size_type		= FIXED_APER_SIZE,
+	.num_aperture_sizes	= 2,
+	.needs_scratch_page	= true,
+	.configure		= intel_i810_configure,
+	.fetch_size		= intel_i810_fetch_size,
+	.cleanup		= intel_i810_cleanup,
+	.mask_memory		= intel_i810_mask_memory,
+	.masks			= intel_i810_masks,
+	.agp_enable		= intel_i810_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= agp_generic_create_gatt_table,
+	.free_gatt_table	= agp_generic_free_gatt_table,
+	.insert_memory		= intel_i810_insert_entries,
+	.remove_memory		= intel_i810_remove_entries,
+	.alloc_by_type		= intel_i810_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_i830_sizes,
+	.size_type		= FIXED_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
+	.configure		= intel_i830_configure,
+	.fetch_size		= intel_i830_fetch_size,
+	.cleanup		= intel_i830_cleanup,
+	.mask_memory		= intel_i810_mask_memory,
+	.masks			= intel_i810_masks,
+	.agp_enable		= intel_i810_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= intel_i830_create_gatt_table,
+	.free_gatt_table	= intel_i830_free_gatt_table,
+	.insert_memory		= intel_i830_insert_entries,
+	.remove_memory		= intel_i830_remove_entries,
+	.alloc_by_type		= intel_i830_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+	.chipset_flush		= intel_i830_chipset_flush,
+};
+
+static const struct agp_bridge_driver intel_915_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_i830_sizes,
+	.size_type		= FIXED_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
+	.configure		= intel_i915_configure,
+	.fetch_size		= intel_i9xx_fetch_size,
+	.cleanup		= intel_i915_cleanup,
+	.mask_memory		= intel_i810_mask_memory,
+	.masks			= intel_i810_masks,
+	.agp_enable		= intel_i810_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= intel_i915_create_gatt_table,
+	.free_gatt_table	= intel_i830_free_gatt_table,
+	.insert_memory		= intel_i915_insert_entries,
+	.remove_memory		= intel_i915_remove_entries,
+	.alloc_by_type		= intel_i830_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+	.chipset_flush		= intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+	.agp_map_page		= intel_agp_map_page,
+	.agp_unmap_page		= intel_agp_unmap_page,
+	.agp_map_memory		= intel_agp_map_memory,
+	.agp_unmap_memory	= intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_i965_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_i830_sizes,
+	.size_type		= FIXED_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
+	.configure		= intel_i915_configure,
+	.fetch_size		= intel_i9xx_fetch_size,
+	.cleanup		= intel_i915_cleanup,
+	.mask_memory		= intel_i965_mask_memory,
+	.masks			= intel_i810_masks,
+	.agp_enable		= intel_i810_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= intel_i965_create_gatt_table,
+	.free_gatt_table	= intel_i830_free_gatt_table,
+	.insert_memory		= intel_i915_insert_entries,
+	.remove_memory		= intel_i915_remove_entries,
+	.alloc_by_type		= intel_i830_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
+	.chipset_flush		= intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+	.agp_map_page		= intel_agp_map_page,
+	.agp_unmap_page		= intel_agp_unmap_page,
+	.agp_map_memory		= intel_agp_map_memory,
+	.agp_unmap_memory	= intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_g33_driver = {
+	.owner			= THIS_MODULE,
+	.aperture_sizes		= intel_i830_sizes,
+	.size_type		= FIXED_APER_SIZE,
+	.num_aperture_sizes	= 4,
+	.needs_scratch_page	= true,
+	.configure		= intel_i915_configure,
+	.fetch_size		= intel_i9xx_fetch_size,
+	.cleanup		= intel_i915_cleanup,
+	.mask_memory		= intel_i965_mask_memory,
+	.masks			= intel_i810_masks,
+	.agp_enable		= intel_i810_agp_enable,
+	.cache_flush		= global_cache_flush,
+	.create_gatt_table	= intel_i915_create_gatt_table,
+	.free_gatt_table	= intel_i830_free_gatt_table,
+	.insert_memory		= intel_i915_insert_entries,
+	.remove_memory		= intel_i915_remove_entries,
+	.alloc_by_type		= intel_i830_alloc_by_type,
+	.free_by_type		= intel_i810_free_by_type,
+	.agp_alloc_page		= agp_generic_alloc_page,
+	.agp_alloc_pages        = agp_generic_alloc_pages,
+	.agp_destroy_page	= agp_generic_destroy_page,
+	.agp_destroy_pages      = agp_generic_destroy_pages,
+	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
+	.chipset_flush		= intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+	.agp_map_page		= intel_agp_map_page,
+	.agp_unmap_page		= intel_agp_unmap_page,
+	.agp_map_memory		= intel_agp_map_memory,
+	.agp_unmap_memory	= intel_agp_unmap_memory,
+#endif
+};
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 10f24e3..b9734a9 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@
 	.aperture_sizes		= nvidia_generic_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 5,
+	.needs_scratch_page	= true,
 	.configure		= nvidia_configure,
 	.fetch_size		= nvidia_fetch_size,
 	.cleanup		= nvidia_cleanup,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 6c3837a..29aacd8 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -125,6 +125,7 @@
 	.aperture_sizes		= sis_generic_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 7,
+	.needs_scratch_page	= true,
 	.configure		= sis_configure,
 	.fetch_size		= sis_fetch_size,
 	.cleanup		= sis_cleanup,
@@ -415,14 +416,6 @@
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
 	},
-	{
-		.class		= (PCI_CLASS_BRIDGE_HOST << 8),
-		.class_mask	= ~0,
-		.vendor		= PCI_VENDOR_ID_SI,
-		.device		= PCI_DEVICE_ID_SI_760,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-	},
 	{ }
 };
 
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 6f48931..95db713 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -28,6 +28,7 @@
  */
 static int uninorth_rev;
 static int is_u3;
+static u32 scratch_value;
 
 #define DEFAULT_APERTURE_SIZE 256
 #define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@
 
 	gp = (u32 *) &agp_bridge->gatt_table[pg_start];
 	for (i = 0; i < mem->page_count; ++i) {
-		if (gp[i]) {
+		if (gp[i] != scratch_value) {
 			dev_info(&agp_bridge->dev->dev,
 				 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
 				 i, gp[i]);
@@ -214,8 +215,9 @@
 		return 0;
 
 	gp = (u32 *) &agp_bridge->gatt_table[pg_start];
-	for (i = 0; i < mem->page_count; ++i)
-		gp[i] = 0;
+	for (i = 0; i < mem->page_count; ++i) {
+		gp[i] = scratch_value;
+	}
 	mb();
 	uninorth_tlbflush(mem);
 
@@ -421,8 +423,13 @@
 
 	bridge->gatt_bus_addr = virt_to_phys(table);
 
+	if (is_u3)
+		scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+	else
+		scratch_value =	cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+				0x1UL);
 	for (i = 0; i < num_entries; i++)
-		bridge->gatt_table[i] = 0;
+		bridge->gatt_table[i] = scratch_value;
 
 	return 0;
 
@@ -519,6 +526,7 @@
 	.agp_destroy_pages	= agp_generic_destroy_pages,
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 	.cant_use_aperture	= true,
+	.needs_scratch_page	= true,
 };
 
 const struct agp_bridge_driver u3_agp_driver = {
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index d3bd243..df67e80 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -175,6 +175,7 @@
 	.aperture_sizes		= agp3_generic_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 10,
+	.needs_scratch_page	= true,
 	.configure		= via_configure_agp3,
 	.fetch_size		= via_fetch_size_agp3,
 	.cleanup		= via_cleanup_agp3,
@@ -201,6 +202,7 @@
 	.aperture_sizes		= via_generic_sizes,
 	.size_type		= U8_APER_SIZE,
 	.num_aperture_sizes	= 9,
+	.needs_scratch_page	= true,
 	.configure		= via_configure,
 	.fetch_size		= via_fetch_size,
 	.cleanup		= via_cleanup,
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index c1ab303..98310e1 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1573,11 +1573,16 @@
 	dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
 
 	/* allot the first empty slot in the array */
-	for (index = 0; index < BOARD_COUNT; index++)
+	for (index = 0; index < BOARD_COUNT; index++) {
 		if (isi_card[index].base == 0) {
 			board = &isi_card[index];
 			break;
 		}
+	}
+	if (index == BOARD_COUNT) {
+		retval = -ENODEV;
+		goto err_disable;
+	}
 
 	board->index = index;
 	board->base = pci_resource_start(pdev, 3);
@@ -1624,6 +1629,7 @@
 errdec:
 	board->base = 0;
 	card_count--;
+err_disable:
 	pci_disable_device(pdev);
 err:
 	return retval;
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
new file mode 100644
index 0000000..c4161d5
--- /dev/null
+++ b/drivers/char/n_gsm.c
@@ -0,0 +1,2763 @@
+/*
+ * n_gsm.c GSM 0710 tty multiplexor
+ * Copyright (c) 2009/10 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *	* THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
+ *
+ * TO DO:
+ *	Mostly done:	ioctls for setting modes/timing
+ *	Partly done: 	hooks so you can pull off frames to non tty devs
+ *	Restart DLCI 0 when it closes ?
+ *	Test basic encoding
+ *	Improve the tx engine
+ *	Resolve tx side locking by adding a queue_head and routing
+ *		all control traffic via it
+ *	General tidy/document
+ *	Review the locking/move to refcounts more (mux now moved to an
+ *		alloc/free model ready)
+ *	Use newest tty open/close port helpers and install hooks
+ *	What to do about power functions ?
+ *	Termios setting and negotiation
+ *	Do we need a 'which mux are you' ioctl to correlate mux and tty sets
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/skbuff.h>
+#include <linux/gsmmux.h>
+
+static int debug;
+module_param(debug, int, 0600);
+
+#define T1	(HZ/10)
+#define T2	(HZ/3)
+#define N2	3
+
+/* Use long timers for testing at low speed with debug on */
+#ifdef DEBUG_TIMING
+#define T1	HZ
+#define T2	(2 * HZ)
+#endif
+
+/* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
+   limits so this is plenty */
+#define MAX_MRU 512
+#define MAX_MTU 512
+
+/*
+ *	Each block of data we have queued to go out is in the form of
+ *	a gsm_msg which holds everything we need in a link layer independant
+ *	format
+ */
+
+struct gsm_msg {
+	struct gsm_msg *next;
+	u8 addr;		/* DLCI address + flags */
+	u8 ctrl;		/* Control byte + flags */
+	unsigned int len;	/* Length of data block (can be zero) */
+	unsigned char *data;	/* Points into buffer but not at the start */
+	unsigned char buffer[0];
+};
+
+/*
+ *	Each active data link has a gsm_dlci structure associated which ties
+ *	the link layer to an optional tty (if the tty side is open). To avoid
+ *	complexity right now these are only ever freed up when the mux is
+ *	shut down.
+ *
+ *	At the moment we don't free DLCI objects until the mux is torn down
+ *	this avoid object life time issues but might be worth review later.
+ */
+
+struct gsm_dlci {
+	struct gsm_mux *gsm;
+	int addr;
+	int state;
+#define DLCI_CLOSED		0
+#define DLCI_OPENING		1	/* Sending SABM not seen UA */
+#define DLCI_OPEN		2	/* SABM/UA complete */
+#define DLCI_CLOSING		3	/* Sending DISC not seen UA/DM */
+
+	/* Link layer */
+	spinlock_t lock;	/* Protects the internal state */
+	struct timer_list t1;	/* Retransmit timer for SABM and UA */
+	int retries;
+	/* Uplink tty if active */
+	struct tty_port port;	/* The tty bound to this DLCI if there is one */
+	struct kfifo *fifo;	/* Queue fifo for the DLCI */
+	struct kfifo _fifo;	/* For new fifo API porting only */
+	int adaption;		/* Adaption layer in use */
+	u32 modem_rx;		/* Our incoming virtual modem lines */
+	u32 modem_tx;		/* Our outgoing modem lines */
+	int dead;		/* Refuse re-open */
+	/* Flow control */
+	int throttled;		/* Private copy of throttle state */
+	int constipated;	/* Throttle status for outgoing */
+	/* Packetised I/O */
+	struct sk_buff *skb;	/* Frame being sent */
+	struct sk_buff_head skb_list;	/* Queued frames */
+	/* Data handling callback */
+	void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+};
+
+/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
+
+#define NUM_DLCI		64
+
+/*
+ *	DLCI 0 is used to pass control blocks out of band of the data
+ *	flow (and with a higher link priority). One command can be outstanding
+ *	at a time and we use this structure to manage them. They are created
+ *	and destroyed by the user context, and updated by the receive paths
+ *	and timers
+ */
+
+struct gsm_control {
+	u8 cmd;		/* Command we are issuing */
+	u8 *data;	/* Data for the command in case we retransmit */
+	int len;	/* Length of block for retransmission */
+	int done;	/* Done flag */
+	int error;	/* Error if any */
+};
+
+/*
+ *	Each GSM mux we have is represented by this structure. If we are
+ *	operating as an ldisc then we use this structure as our ldisc
+ *	state. We need to sort out lifetimes and locking with respect
+ *	to the gsm mux array. For now we don't free DLCI objects that
+ *	have been instantiated until the mux itself is terminated.
+ *
+ *	To consider further: tty open versus mux shutdown.
+ */
+
+struct gsm_mux {
+	struct tty_struct *tty;		/* The tty our ldisc is bound to */
+	spinlock_t lock;
+
+	/* Events on the GSM channel */
+	wait_queue_head_t event;
+
+	/* Bits for GSM mode decoding */
+
+	/* Framing Layer */
+	unsigned char *buf;
+	int state;
+#define GSM_SEARCH		0
+#define GSM_START		1
+#define GSM_ADDRESS		2
+#define GSM_CONTROL		3
+#define GSM_LEN			4
+#define GSM_DATA		5
+#define GSM_FCS			6
+#define GSM_OVERRUN		7
+	unsigned int len;
+	unsigned int address;
+	unsigned int count;
+	int escape;
+	int encoding;
+	u8 control;
+	u8 fcs;
+	u8 *txframe;			/* TX framing buffer */
+
+	/* Methods for the receiver side */
+	void (*receive)(struct gsm_mux *gsm, u8 ch);
+	void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
+	/* And transmit side */
+	int (*output)(struct gsm_mux *mux, u8 *data, int len);
+
+	/* Link Layer */
+	unsigned int mru;
+	unsigned int mtu;
+	int initiator;			/* Did we initiate connection */
+	int dead;			/* Has the mux been shut down */
+	struct gsm_dlci *dlci[NUM_DLCI];
+	int constipated;		/* Asked by remote to shut up */
+
+	spinlock_t tx_lock;
+	unsigned int tx_bytes;		/* TX data outstanding */
+#define TX_THRESH_HI		8192
+#define TX_THRESH_LO		2048
+	struct gsm_msg *tx_head;	/* Pending data packets */
+	struct gsm_msg *tx_tail;
+
+	/* Control messages */
+	struct timer_list t2_timer;	/* Retransmit timer for commands */
+	int cretries;			/* Command retry counter */
+	struct gsm_control *pending_cmd;/* Our current pending command */
+	spinlock_t control_lock;	/* Protects the pending command */
+
+	/* Configuration */
+	int adaption;		/* 1 or 2 supported */
+	u8 ftype;		/* UI or UIH */
+	int t1, t2;		/* Timers in 1/100th of a sec */
+	int n2;			/* Retry count */
+
+	/* Statistics (not currently exposed) */
+	unsigned long bad_fcs;
+	unsigned long malformed;
+	unsigned long io_error;
+	unsigned long bad_size;
+	unsigned long unsupported;
+};
+
+
+/*
+ *	Mux objects - needed so that we can translate a tty index into the
+ *	relevant mux and DLCI.
+ */
+
+#define MAX_MUX		4			/* 256 minors */
+static struct gsm_mux *gsm_mux[MAX_MUX];	/* GSM muxes */
+static spinlock_t gsm_mux_lock;
+
+/*
+ *	This section of the driver logic implements the GSM encodings
+ *	both the basic and the 'advanced'. Reliable transport is not
+ *	supported.
+ */
+
+#define CR			0x02
+#define EA			0x01
+#define	PF			0x10
+
+/* I is special: the rest are ..*/
+#define RR			0x01
+#define UI			0x03
+#define RNR			0x05
+#define REJ			0x09
+#define DM			0x0F
+#define SABM			0x2F
+#define DISC			0x43
+#define UA			0x63
+#define	UIH			0xEF
+
+/* Channel commands */
+#define CMD_NSC			0x09
+#define CMD_TEST		0x11
+#define CMD_PSC			0x21
+#define CMD_RLS			0x29
+#define CMD_FCOFF		0x31
+#define CMD_PN			0x41
+#define CMD_RPN			0x49
+#define CMD_FCON		0x51
+#define CMD_CLD			0x61
+#define CMD_SNC			0x69
+#define CMD_MSC			0x71
+
+/* Virtual modem bits */
+#define MDM_FC			0x01
+#define MDM_RTC			0x02
+#define MDM_RTR			0x04
+#define MDM_IC			0x20
+#define MDM_DV			0x40
+
+#define GSM0_SOF		0xF9
+#define GSM1_SOF 		0x7E
+#define GSM1_ESCAPE		0x7D
+#define GSM1_ESCAPE_BITS	0x20
+#define XON			0x11
+#define XOFF			0x13
+
+static const struct tty_port_operations gsm_port_ops;
+
+/*
+ *	CRC table for GSM 0710
+ */
+
+static const u8 gsm_fcs8[256] = {
+	0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
+	0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
+	0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
+	0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
+	0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
+	0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
+	0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
+	0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
+	0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
+	0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
+	0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
+	0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
+	0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
+	0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
+	0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
+	0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
+	0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
+	0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
+	0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
+	0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
+	0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
+	0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
+	0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
+	0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
+	0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
+	0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
+	0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
+	0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
+	0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
+	0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
+	0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
+	0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
+};
+
+#define INIT_FCS	0xFF
+#define GOOD_FCS	0xCF
+
+/**
+ *	gsm_fcs_add	-	update FCS
+ *	@fcs: Current FCS
+ *	@c: Next data
+ *
+ *	Update the FCS to include c. Uses the algorithm in the specification
+ *	notes.
+ */
+
+static inline u8 gsm_fcs_add(u8 fcs, u8 c)
+{
+	return gsm_fcs8[fcs ^ c];
+}
+
+/**
+ *	gsm_fcs_add_block	-	update FCS for a block
+ *	@fcs: Current FCS
+ *	@c: buffer of data
+ *	@len: length of buffer
+ *
+ *	Update the FCS to include c. Uses the algorithm in the specification
+ *	notes.
+ */
+
+static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
+{
+	while (len--)
+		fcs = gsm_fcs8[fcs ^ *c++];
+	return fcs;
+}
+
+/**
+ *	gsm_read_ea		-	read a byte into an EA
+ *	@val: variable holding value
+ *	c: byte going into the EA
+ *
+ *	Processes one byte of an EA. Updates the passed variable
+ *	and returns 1 if the EA is now completely read
+ */
+
+static int gsm_read_ea(unsigned int *val, u8 c)
+{
+	/* Add the next 7 bits into the value */
+	*val <<= 7;
+	*val |= c >> 1;
+	/* Was this the last byte of the EA 1 = yes*/
+	return c & EA;
+}
+
+/**
+ *	gsm_encode_modem	-	encode modem data bits
+ *	@dlci: DLCI to encode from
+ *
+ *	Returns the correct GSM encoded modem status bits (6 bit field) for
+ *	the current status of the DLCI and attached tty object
+ */
+
+static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
+{
+	u8 modembits = 0;
+	/* FC is true flow control not modem bits */
+	if (dlci->throttled)
+		modembits |= MDM_FC;
+	if (dlci->modem_tx & TIOCM_DTR)
+		modembits |= MDM_RTC;
+	if (dlci->modem_tx & TIOCM_RTS)
+		modembits |= MDM_RTR;
+	if (dlci->modem_tx & TIOCM_RI)
+		modembits |= MDM_IC;
+	if (dlci->modem_tx & TIOCM_CD)
+		modembits |= MDM_DV;
+	return modembits;
+}
+
+/**
+ *	gsm_print_packet	-	display a frame for debug
+ *	@hdr: header to print before decode
+ *	@addr: address EA from the frame
+ *	@cr: C/R bit from the frame
+ *	@control: control including PF bit
+ *	@data: following data bytes
+ *	@dlen: length of data
+ *
+ *	Displays a packet in human readable format for debugging purposes. The
+ *	style is based on amateur radio LAP-B dump display.
+ */
+
+static void gsm_print_packet(const char *hdr, int addr, int cr,
+					u8 control, const u8 *data, int dlen)
+{
+	if (!(debug & 1))
+		return;
+
+	printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]);
+
+	switch (control & ~PF) {
+	case SABM:
+		printk(KERN_CONT "SABM");
+		break;
+	case UA:
+		printk(KERN_CONT "UA");
+		break;
+	case DISC:
+		printk(KERN_CONT "DISC");
+		break;
+	case DM:
+		printk(KERN_CONT "DM");
+		break;
+	case UI:
+		printk(KERN_CONT "UI");
+		break;
+	case UIH:
+		printk(KERN_CONT "UIH");
+		break;
+	default:
+		if (!(control & 0x01)) {
+			printk(KERN_CONT "I N(S)%d N(R)%d",
+				(control & 0x0E) >> 1, (control & 0xE)>> 5);
+		} else switch (control & 0x0F) {
+		case RR:
+			printk("RR(%d)", (control & 0xE0) >> 5);
+			break;
+		case RNR:
+			printk("RNR(%d)", (control & 0xE0) >> 5);
+			break;
+		case REJ:
+			printk("REJ(%d)", (control & 0xE0) >> 5);
+			break;
+		default:
+			printk(KERN_CONT "[%02X]", control);
+		}
+	}
+
+	if (control & PF)
+		printk(KERN_CONT "(P)");
+	else
+		printk(KERN_CONT "(F)");
+
+	if (dlen) {
+		int ct = 0;
+		while (dlen--) {
+			if (ct % 8 == 0)
+				printk(KERN_CONT "\n    ");
+			printk(KERN_CONT "%02X ", *data++);
+			ct++;
+		}
+	}
+	printk(KERN_CONT "\n");
+}
+
+
+/*
+ *	Link level transmission side
+ */
+
+/**
+ *	gsm_stuff_packet	-	bytestuff a packet
+ *	@ibuf: input
+ *	@obuf: output
+ *	@len: length of input
+ *
+ *	Expand a buffer by bytestuffing it. The worst case size change
+ *	is doubling and the caller is responsible for handing out
+ *	suitable sized buffers.
+ */
+
+static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
+{
+	int olen = 0;
+	while (len--) {
+		if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+		    || *input == XON || *input == XOFF) {
+			*output++ = GSM1_ESCAPE;
+			*output++ = *input++ ^ GSM1_ESCAPE_BITS;
+			olen++;
+		} else
+			*output++ = *input++;
+		olen++;
+	}
+	return olen;
+}
+
+static void hex_packet(const unsigned char *p, int len)
+{
+	int i;
+	for (i = 0; i < len; i++) {
+		if (i && (i % 16) == 0)
+			printk("\n");
+		printk("%02X ", *p++);
+	}
+	printk("\n");
+}
+
+/**
+ *	gsm_send	-	send a control frame
+ *	@gsm: our GSM mux
+ *	@addr: address for control frame
+ *	@cr: command/response bit
+ *	@control:  control byte including PF bit
+ *
+ *	Format up and transmit a control frame. These do not go via the
+ *	queueing logic as they should be transmitted ahead of data when
+ *	they are needed.
+ *
+ *	FIXME: Lock versus data TX path
+ */
+
+static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+{
+	int len;
+	u8 cbuf[10];
+	u8 ibuf[3];
+
+	switch (gsm->encoding) {
+	case 0:
+		cbuf[0] = GSM0_SOF;
+		cbuf[1] = (addr << 2) | (cr << 1) | EA;
+		cbuf[2] = control;
+		cbuf[3] = EA;	/* Length of data = 0 */
+		cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
+		cbuf[5] = GSM0_SOF;
+		len = 6;
+		break;
+	case 1:
+	case 2:
+		/* Control frame + packing (but not frame stuffing) in mode 1 */
+		ibuf[0] = (addr << 2) | (cr << 1) | EA;
+		ibuf[1] = control;
+		ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
+		/* Stuffing may double the size worst case */
+		len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
+		/* Now add the SOF markers */
+		cbuf[0] = GSM1_SOF;
+		cbuf[len + 1] = GSM1_SOF;
+		/* FIXME: we can omit the lead one in many cases */
+		len += 2;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+	gsm->output(gsm, cbuf, len);
+	gsm_print_packet("-->", addr, cr, control, NULL, 0);
+}
+
+/**
+ *	gsm_response	-	send a control response
+ *	@gsm: our GSM mux
+ *	@addr: address for control frame
+ *	@control:  control byte including PF bit
+ *
+ *	Format up and transmit a link level response frame.
+ */
+
+static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
+{
+	gsm_send(gsm, addr, 0, control);
+}
+
+/**
+ *	gsm_command	-	send a control command
+ *	@gsm: our GSM mux
+ *	@addr: address for control frame
+ *	@control:  control byte including PF bit
+ *
+ *	Format up and transmit a link level command frame.
+ */
+
+static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
+{
+	gsm_send(gsm, addr, 1, control);
+}
+
+/* Data transmission */
+
+#define HDR_LEN		6	/* ADDR CTRL [LEN.2] DATA FCS */
+
+/**
+ *	gsm_data_alloc		-	allocate data frame
+ *	@gsm: GSM mux
+ *	@addr: DLCI address
+ *	@len: length excluding header and FCS
+ *	@ctrl: control byte
+ *
+ *	Allocate a new data buffer for sending frames with data. Space is left
+ *	at the front for header bytes but that is treated as an implementation
+ *	detail and not for the high level code to use
+ */
+
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+								u8 ctrl)
+{
+	struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
+								GFP_ATOMIC);
+	if (m == NULL)
+		return NULL;
+	m->data = m->buffer + HDR_LEN - 1;	/* Allow for FCS */
+	m->len = len;
+	m->addr = addr;
+	m->ctrl = ctrl;
+	m->next = NULL;
+	return m;
+}
+
+/**
+ *	gsm_data_kick		-	poke the queue
+ *	@gsm: GSM Mux
+ *
+ *	The tty device has called us to indicate that room has appeared in
+ *	the transmit queue. Ram more data into the pipe if we have any
+ *
+ *	FIXME: lock against link layer control transmissions
+ */
+
+static void gsm_data_kick(struct gsm_mux *gsm)
+{
+	struct gsm_msg *msg = gsm->tx_head;
+	int len;
+	int skip_sof = 0;
+
+	/* FIXME: We need to apply this solely to data messages */
+	if (gsm->constipated)
+		return;
+
+	while (gsm->tx_head != NULL) {
+		msg = gsm->tx_head;
+		if (gsm->encoding != 0) {
+			gsm->txframe[0] = GSM1_SOF;
+			len = gsm_stuff_frame(msg->data,
+						gsm->txframe + 1, msg->len);
+			gsm->txframe[len + 1] = GSM1_SOF;
+			len += 2;
+		} else {
+			gsm->txframe[0] = GSM0_SOF;
+			memcpy(gsm->txframe + 1 , msg->data, msg->len);
+			gsm->txframe[msg->len + 1] = GSM0_SOF;
+			len = msg->len + 2;
+		}
+
+		if (debug & 4) {
+			printk("gsm_data_kick: \n");
+			hex_packet(gsm->txframe, len);
+		}
+
+		if (gsm->output(gsm, gsm->txframe + skip_sof,
+						len - skip_sof) < 0)
+			break;
+		/* FIXME: Can eliminate one SOF in many more cases */
+		gsm->tx_head = msg->next;
+		if (gsm->tx_head == NULL)
+			gsm->tx_tail = NULL;
+		gsm->tx_bytes -= msg->len;
+		kfree(msg);
+		/* For a burst of frames skip the extra SOF within the
+		   burst */
+		skip_sof = 1;
+	}
+}
+
+/**
+ *	__gsm_data_queue		-	queue a UI or UIH frame
+ *	@dlci: DLCI sending the data
+ *	@msg: message queued
+ *
+ *	Add data to the transmit queue and try and get stuff moving
+ *	out of the mux tty if not already doing so. The Caller must hold
+ *	the gsm tx lock.
+ */
+
+static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+	struct gsm_mux *gsm = dlci->gsm;
+	u8 *dp = msg->data;
+	u8 *fcs = dp + msg->len;
+
+	/* Fill in the header */
+	if (gsm->encoding == 0) {
+		if (msg->len < 128)
+			*--dp = (msg->len << 1) | EA;
+		else {
+			*--dp = (msg->len >> 6) | EA;
+			*--dp = (msg->len & 127) << 1;
+		}
+	}
+
+	*--dp = msg->ctrl;
+	if (gsm->initiator)
+		*--dp = (msg->addr << 2) | 2 | EA;
+	else
+		*--dp = (msg->addr << 2) | EA;
+	*fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
+	/* Ugly protocol layering violation */
+	if (msg->ctrl == UI || msg->ctrl == (UI|PF))
+		*fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
+	*fcs = 0xFF - *fcs;
+
+	gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
+							msg->data, msg->len);
+
+	/* Move the header back and adjust the length, also allow for the FCS
+	   now tacked on the end */
+	msg->len += (msg->data - dp) + 1;
+	msg->data = dp;
+
+	/* Add to the actual output queue */
+	if (gsm->tx_tail)
+		gsm->tx_tail->next = msg;
+	else
+		gsm->tx_head = msg;
+	gsm->tx_tail = msg;
+	gsm->tx_bytes += msg->len;
+	gsm_data_kick(gsm);
+}
+
+/**
+ *	gsm_data_queue		-	queue a UI or UIH frame
+ *	@dlci: DLCI sending the data
+ *	@msg: message queued
+ *
+ *	Add data to the transmit queue and try and get stuff moving
+ *	out of the mux tty if not already doing so. Take the
+ *	the gsm tx lock and dlci lock.
+ */
+
+static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+	__gsm_data_queue(dlci, msg);
+	spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/**
+ *	gsm_dlci_data_output	-	try and push data out of a DLCI
+ *	@gsm: mux
+ *	@dlci: the DLCI to pull data from
+ *
+ *	Pull data from a DLCI and send it into the transmit queue if there
+ *	is data. Keep to the MRU of the mux. This path handles the usual tty
+ *	interface which is a byte stream with optional modem data.
+ *
+ *	Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+	struct gsm_msg *msg;
+	u8 *dp;
+	int len, size;
+	int h = dlci->adaption - 1;
+
+	len = kfifo_len(dlci->fifo);
+	if (len == 0)
+		return 0;
+
+	/* MTU/MRU count only the data bits */
+	if (len > gsm->mtu)
+		len = gsm->mtu;
+
+	size = len + h;
+
+	msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+	/* FIXME: need a timer or something to kick this so it can't
+	   get stuck with no work outstanding and no buffer free */
+	if (msg == NULL)
+		return -ENOMEM;
+	dp = msg->data;
+	switch (dlci->adaption) {
+	case 1:	/* Unstructured */
+		break;
+	case 2:	/* Unstructed with modem bits. Always one byte as we never
+		   send inline break data */
+		*dp += gsm_encode_modem(dlci);
+		len--;
+		break;
+	}
+	WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+	__gsm_data_queue(dlci, msg);
+	/* Bytes of data we used up */
+	return size;
+}
+
+/**
+ *	gsm_dlci_data_output_framed  -	try and push data out of a DLCI
+ *	@gsm: mux
+ *	@dlci: the DLCI to pull data from
+ *
+ *	Pull data from a DLCI and send it into the transmit queue if there
+ *	is data. Keep to the MRU of the mux. This path handles framed data
+ *	queued as skbuffs to the DLCI.
+ *
+ *	Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+						struct gsm_dlci *dlci)
+{
+	struct gsm_msg *msg;
+	u8 *dp;
+	int len, size;
+	int last = 0, first = 0;
+	int overhead = 0;
+
+	/* One byte per frame is used for B/F flags */
+	if (dlci->adaption == 4)
+		overhead = 1;
+
+	/* dlci->skb is locked by tx_lock */
+	if (dlci->skb == NULL) {
+		dlci->skb = skb_dequeue(&dlci->skb_list);
+		if (dlci->skb == NULL)
+			return 0;
+		first = 1;
+	}
+	len = dlci->skb->len + overhead;
+
+	/* MTU/MRU count only the data bits */
+	if (len > gsm->mtu) {
+		if (dlci->adaption == 3) {
+			/* Over long frame, bin it */
+			kfree_skb(dlci->skb);
+			dlci->skb = NULL;
+			return 0;
+		}
+		len = gsm->mtu;
+	} else
+		last = 1;
+
+	size = len + overhead;
+	msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+
+	/* FIXME: need a timer or something to kick this so it can't
+	   get stuck with no work outstanding and no buffer free */
+	if (msg == NULL)
+		return -ENOMEM;
+	dp = msg->data;
+
+	if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+		/* Flag byte to carry the start/end info */
+		*dp++ = last << 7 | first << 6 | 1;	/* EA */
+		len--;
+	}
+	memcpy(dp, skb_pull(dlci->skb, len), len);
+	__gsm_data_queue(dlci, msg);
+	if (last)
+		dlci->skb = NULL;
+	return size;
+}
+
+/**
+ *	gsm_dlci_data_sweep		-	look for data to send
+ *	@gsm: the GSM mux
+ *
+ *	Sweep the GSM mux channels in priority order looking for ones with
+ *	data to send. We could do with optimising this scan a bit. We aim
+ *	to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
+ *	TX_THRESH_LO we get called again
+ *
+ *	FIXME: We should round robin between groups and in theory you can
+ *	renegotiate DLCI priorities with optional stuff. Needs optimising.
+ */
+
+static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+{
+	int len;
+	/* Priority ordering: We should do priority with RR of the groups */
+	int i = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gsm->tx_lock, flags);
+	while (i < NUM_DLCI) {
+		struct gsm_dlci *dlci;
+
+		if (gsm->tx_bytes > TX_THRESH_HI)
+			break;
+		dlci = gsm->dlci[i];
+		if (dlci == NULL || dlci->constipated) {
+			i++;
+			continue;
+		}
+		if (dlci->adaption < 3)
+			len = gsm_dlci_data_output(gsm, dlci);
+		else
+			len = gsm_dlci_data_output_framed(gsm, dlci);
+		if (len < 0)
+			return;
+		/* DLCI empty - try the next */
+		if (len == 0)
+			i++;
+	}
+	spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ *	gsm_dlci_data_kick	-	transmit if possible
+ *	@dlci: DLCI to kick
+ *
+ *	Transmit data from this DLCI if the queue is empty. We can't rely on
+ *	a tty wakeup except when we filled the pipe so we need to fire off
+ *	new data ourselves in other cases.
+ */
+
+static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+	/* If we have nothing running then we need to fire up */
+	if (dlci->gsm->tx_bytes == 0)
+		gsm_dlci_data_output(dlci->gsm, dlci);
+	else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+		gsm_dlci_data_sweep(dlci->gsm);
+	spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/*
+ *	Control message processing
+ */
+
+
+/**
+ *	gsm_control_reply	-	send a response frame to a control
+ *	@gsm: gsm channel
+ *	@cmd: the command to use
+ *	@data: data to follow encoded info
+ *	@dlen: length of data
+ *
+ *	Encode up and queue a UI/UIH frame containing our response.
+ */
+
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+					int dlen)
+{
+	struct gsm_msg *msg;
+	msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+	msg->data[0] = (cmd & 0xFE) << 1 | EA;	/* Clear C/R */
+	msg->data[1] = (dlen << 1) | EA;
+	memcpy(msg->data + 2, data, dlen);
+	gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ *	gsm_process_modem	-	process received modem status
+ *	@tty: virtual tty bound to the DLCI
+ *	@dlci: DLCI to affect
+ *	@modem: modem bits (full EA)
+ *
+ *	Used when a modem control message or line state inline in adaption
+ *	layer 2 is processed. Sort out the local modem state and throttles
+ */
+
+static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+							u32 modem)
+{
+	int  mlines = 0;
+	u8 brk = modem >> 6;
+
+	/* Flow control/ready to communicate */
+	if (modem & MDM_FC) {
+		/* Need to throttle our output on this device */
+		dlci->constipated = 1;
+	}
+	if (modem & MDM_RTC) {
+		mlines |= TIOCM_DSR | TIOCM_DTR;
+		dlci->constipated = 0;
+		gsm_dlci_data_kick(dlci);
+	}
+	/* Map modem bits */
+	if (modem & MDM_RTR)
+		mlines |= TIOCM_RTS | TIOCM_CTS;
+	if (modem & MDM_IC)
+		mlines |= TIOCM_RI;
+	if (modem & MDM_DV)
+		mlines |= TIOCM_CD;
+
+	/* Carrier drop -> hangup */
+	if (tty) {
+		if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
+			if (!(tty->termios->c_cflag & CLOCAL))
+				tty_hangup(tty);
+		if (brk & 0x01)
+			tty_insert_flip_char(tty, 0, TTY_BREAK);
+	}
+	dlci->modem_rx = mlines;
+}
+
+/**
+ *	gsm_control_modem	-	modem status received
+ *	@gsm: GSM channel
+ *	@data: data following command
+ *	@clen: command length
+ *
+ *	We have received a modem status control message. This is used by
+ *	the GSM mux protocol to pass virtual modem line status and optionally
+ *	to indicate break signals. Unpack it, convert to Linux representation
+ *	and if need be stuff a break message down the tty.
+ */
+
+static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+{
+	unsigned int addr = 0;
+	unsigned int modem = 0;
+	struct gsm_dlci *dlci;
+	int len = clen;
+	u8 *dp = data;
+	struct tty_struct *tty;
+
+	while (gsm_read_ea(&addr, *dp++) == 0) {
+		len--;
+		if (len == 0)
+			return;
+	}
+	/* Must be at least one byte following the EA */
+	len--;
+	if (len <= 0)
+		return;
+
+	addr >>= 1;
+	/* Closed port, or invalid ? */
+	if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+		return;
+	dlci = gsm->dlci[addr];
+
+	while (gsm_read_ea(&modem, *dp++) == 0) {
+		len--;
+		if (len == 0)
+			return;
+	}
+	tty = tty_port_tty_get(&dlci->port);
+	gsm_process_modem(tty, dlci, modem);
+	if (tty) {
+		tty_wakeup(tty);
+		tty_kref_put(tty);
+	}
+	gsm_control_reply(gsm, CMD_MSC, data, clen);
+}
+
+/**
+ *	gsm_control_rls		-	remote line status
+ *	@gsm: GSM channel
+ *	@data: data bytes
+ *	@clen: data length
+ *
+ *	The modem sends us a two byte message on the control channel whenever
+ *	it wishes to send us an error state from the virtual link. Stuff
+ *	this into the uplink tty if present
+ */
+
+static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+{
+	struct tty_struct *tty;
+	unsigned int addr = 0 ;
+	u8 bits;
+	int len = clen;
+	u8 *dp = data;
+
+	while (gsm_read_ea(&addr, *dp++) == 0) {
+		len--;
+		if (len == 0)
+			return;
+	}
+	/* Must be at least one byte following ea */
+	len--;
+	if (len <= 0)
+		return;
+	addr >>= 1;
+	/* Closed port, or invalid ? */
+	if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+		return;
+	/* No error ? */
+	bits = *dp;
+	if ((bits & 1) == 0)
+		return;
+	/* See if we have an uplink tty */
+	tty = tty_port_tty_get(&gsm->dlci[addr]->port);
+
+	if (tty) {
+		if (bits & 2)
+			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		if (bits & 4)
+			tty_insert_flip_char(tty, 0, TTY_PARITY);
+		if (bits & 8)
+			tty_insert_flip_char(tty, 0, TTY_FRAME);
+		tty_flip_buffer_push(tty);
+		tty_kref_put(tty);
+	}
+	gsm_control_reply(gsm, CMD_RLS, data, clen);
+}
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
+
+/**
+ *	gsm_control_message	-	DLCI 0 control processing
+ *	@gsm: our GSM mux
+ *	@command:  the command EA
+ *	@data: data beyond the command/length EAs
+ *	@clen: length
+ *
+ *	Input processor for control messages from the other end of the link.
+ *	Processes the incoming request and queues a response frame or an
+ *	NSC response if not supported
+ */
+
+static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+							u8 *data, int clen)
+{
+	u8 buf[1];
+	switch (command) {
+	case CMD_CLD: {
+		struct gsm_dlci *dlci = gsm->dlci[0];
+		/* Modem wishes to close down */
+		if (dlci) {
+			dlci->dead = 1;
+			gsm->dead = 1;
+			gsm_dlci_begin_close(dlci);
+		}
+		}
+		break;
+	case CMD_TEST:
+		/* Modem wishes to test, reply with the data */
+		gsm_control_reply(gsm, CMD_TEST, data, clen);
+		break;
+	case CMD_FCON:
+		/* Modem wants us to STFU */
+		gsm->constipated = 1;
+		gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+		break;
+	case CMD_FCOFF:
+		/* Modem can accept data again */
+		gsm->constipated = 0;
+		gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+		/* Kick the link in case it is idling */
+		gsm_data_kick(gsm);
+		break;
+	case CMD_MSC:
+		/* Out of band modem line change indicator for a DLCI */
+		gsm_control_modem(gsm, data, clen);
+		break;
+	case CMD_RLS:
+		/* Out of band error reception for a DLCI */
+		gsm_control_rls(gsm, data, clen);
+		break;
+	case CMD_PSC:
+		/* Modem wishes to enter power saving state */
+		gsm_control_reply(gsm, CMD_PSC, NULL, 0);
+		break;
+		/* Optional unsupported commands */
+	case CMD_PN:	/* Parameter negotiation */
+	case CMD_RPN:	/* Remote port negotation */
+	case CMD_SNC:	/* Service negotation command */
+	default:
+		/* Reply to bad commands with an NSC */
+		buf[0] = command;
+		gsm_control_reply(gsm, CMD_NSC, buf, 1);
+		break;
+	}
+}
+
+/**
+ *	gsm_control_response	-	process a response to our control
+ *	@gsm: our GSM mux
+ *	@command: the command (response) EA
+ *	@data: data beyond the command/length EA
+ *	@clen: length
+ *
+ *	Process a response to an outstanding command. We only allow a single
+ *	control message in flight so this is fairly easy. All the clean up
+ *	is done by the caller, we just update the fields, flag it as done
+ *	and return
+ */
+
+static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
+							u8 *data, int clen)
+{
+	struct gsm_control *ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gsm->control_lock, flags);
+
+	ctrl = gsm->pending_cmd;
+	/* Does the reply match our command */
+	command |= 1;
+	if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
+		/* Our command was replied to, kill the retry timer */
+		del_timer(&gsm->t2_timer);
+		gsm->pending_cmd = NULL;
+		/* Rejected by the other end */
+		if (command == CMD_NSC)
+			ctrl->error = -EOPNOTSUPP;
+		ctrl->done = 1;
+		wake_up(&gsm->event);
+	}
+	spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ *	gsm_control_transmit 	-	send control packet
+ *	@gsm: gsm mux
+ *	@ctrl: frame to send
+ *
+ *	Send out a pending control command (called under control lock)
+ */
+
+static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
+{
+	struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1,
+							gsm->ftype|PF);
+	if (msg == NULL)
+		return;
+	msg->data[0] = (ctrl->cmd << 1) | 2 | EA;	/* command */
+	memcpy(msg->data + 1, ctrl->data, ctrl->len);
+	gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ *	gsm_control_retransmit	-	retransmit a control frame
+ *	@data: pointer to our gsm object
+ *
+ *	Called off the T2 timer expiry in order to retransmit control frames
+ *	that have been lost in the system somewhere. The control_lock protects
+ *	us from colliding with another sender or a receive completion event.
+ *	In that situation the timer may still occur in a small window but
+ *	gsm->pending_cmd will be NULL and we just let the timer expire.
+ */
+
+static void gsm_control_retransmit(unsigned long data)
+{
+	struct gsm_mux *gsm = (struct gsm_mux *)data;
+	struct gsm_control *ctrl;
+	unsigned long flags;
+	spin_lock_irqsave(&gsm->control_lock, flags);
+	ctrl = gsm->pending_cmd;
+	if (ctrl) {
+		gsm->cretries--;
+		if (gsm->cretries == 0) {
+			gsm->pending_cmd = NULL;
+			ctrl->error = -ETIMEDOUT;
+			ctrl->done = 1;
+			spin_unlock_irqrestore(&gsm->control_lock, flags);
+			wake_up(&gsm->event);
+			return;
+		}
+		gsm_control_transmit(gsm, ctrl);
+		mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+	}
+	spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ *	gsm_control_send	-	send a control frame on DLCI 0
+ *	@gsm: the GSM channel
+ *	@command: command  to send including CR bit
+ *	@data: bytes of data (must be kmalloced)
+ *	@len: length of the block to send
+ *
+ *	Queue and dispatch a control command. Only one command can be
+ *	active at a time. In theory more can be outstanding but the matching
+ *	gets really complicated so for now stick to one outstanding.
+ */
+
+static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+		unsigned int command, u8 *data, int clen)
+{
+	struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
+						GFP_KERNEL);
+	unsigned long flags;
+	if (ctrl == NULL)
+		return NULL;
+retry:
+	wait_event(gsm->event, gsm->pending_cmd == NULL);
+	spin_lock_irqsave(&gsm->control_lock, flags);
+	if (gsm->pending_cmd != NULL) {
+		spin_unlock_irqrestore(&gsm->control_lock, flags);
+		goto retry;
+	}
+	ctrl->cmd = command;
+	ctrl->data = data;
+	ctrl->len = clen;
+	gsm->pending_cmd = ctrl;
+	gsm->cretries = gsm->n2;
+	mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+	gsm_control_transmit(gsm, ctrl);
+	spin_unlock_irqrestore(&gsm->control_lock, flags);
+	return ctrl;
+}
+
+/**
+ *	gsm_control_wait	-	wait for a control to finish
+ *	@gsm: GSM mux
+ *	@control: control we are waiting on
+ *
+ *	Waits for the control to complete or time out. Frees any used
+ *	resources and returns 0 for success, or an error if the remote
+ *	rejected or ignored the request.
+ */
+
+static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
+{
+	int err;
+	wait_event(gsm->event, control->done == 1);
+	err = control->error;
+	kfree(control);
+	return err;
+}
+
+
+/*
+ *	DLCI level handling: Needs krefs
+ */
+
+/*
+ *	State transitions and timers
+ */
+
+/**
+ *	gsm_dlci_close		-	a DLCI has closed
+ *	@dlci: DLCI that closed
+ *
+ *	Perform processing when moving a DLCI into closed state. If there
+ *	is an attached tty this is hung up
+ */
+
+static void gsm_dlci_close(struct gsm_dlci *dlci)
+{
+	del_timer(&dlci->t1);
+	if (debug & 8)
+		printk("DLCI %d goes closed.\n", dlci->addr);
+	dlci->state = DLCI_CLOSED;
+	if (dlci->addr != 0) {
+		struct tty_struct  *tty = tty_port_tty_get(&dlci->port);
+		if (tty) {
+			tty_hangup(tty);
+			tty_kref_put(tty);
+		}
+		kfifo_reset(dlci->fifo);
+	} else
+		dlci->gsm->dead = 1;
+	wake_up(&dlci->gsm->event);
+	/* A DLCI 0 close is a MUX termination so we need to kick that
+	   back to userspace somehow */
+}
+
+/**
+ *	gsm_dlci_open		-	a DLCI has opened
+ *	@dlci: DLCI that opened
+ *
+ *	Perform processing when moving a DLCI into open state.
+ */
+
+static void gsm_dlci_open(struct gsm_dlci *dlci)
+{
+	/* Note that SABM UA .. SABM UA first UA lost can mean that we go
+	   open -> open */
+	del_timer(&dlci->t1);
+	/* This will let a tty open continue */
+	dlci->state = DLCI_OPEN;
+	if (debug & 8)
+		printk("DLCI %d goes open.\n", dlci->addr);
+	wake_up(&dlci->gsm->event);
+}
+
+/**
+ *	gsm_dlci_t1		-	T1 timer expiry
+ *	@dlci: DLCI that opened
+ *
+ *	The T1 timer handles retransmits of control frames (essentially of
+ *	SABM and DISC). We resend the command until the retry count runs out
+ *	in which case an opening port goes back to closed and a closing port
+ *	is simply put into closed state (any further frames from the other
+ *	end will get a DM response)
+ */
+
+static void gsm_dlci_t1(unsigned long data)
+{
+	struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+	struct gsm_mux *gsm = dlci->gsm;
+
+	switch (dlci->state) {
+	case DLCI_OPENING:
+		dlci->retries--;
+		if (dlci->retries) {
+			gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+			mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+		} else
+			gsm_dlci_close(dlci);
+		break;
+	case DLCI_CLOSING:
+		dlci->retries--;
+		if (dlci->retries) {
+			gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+			mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+		} else
+			gsm_dlci_close(dlci);
+		break;
+	}
+}
+
+/**
+ *	gsm_dlci_begin_open	-	start channel open procedure
+ *	@dlci: DLCI to open
+ *
+ *	Commence opening a DLCI from the Linux side. We issue SABM messages
+ *	to the modem which should then reply with a UA, at which point we
+ *	will move into open state. Opening is done asynchronously with retry
+ *	running off timers and the responses.
+ */
+
+static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
+{
+	struct gsm_mux *gsm = dlci->gsm;
+	if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING)
+		return;
+	dlci->retries = gsm->n2;
+	dlci->state = DLCI_OPENING;
+	gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+	mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ *	gsm_dlci_begin_close	-	start channel open procedure
+ *	@dlci: DLCI to open
+ *
+ *	Commence closing a DLCI from the Linux side. We issue DISC messages
+ *	to the modem which should then reply with a UA, at which point we
+ *	will move into closed state. Closing is done asynchronously with retry
+ *	off timers. We may also receive a DM reply from the other end which
+ *	indicates the channel was already closed.
+ */
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
+{
+	struct gsm_mux *gsm = dlci->gsm;
+	if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+		return;
+	dlci->retries = gsm->n2;
+	dlci->state = DLCI_CLOSING;
+	gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+	mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ *	gsm_dlci_data		-	data arrived
+ *	@dlci: channel
+ *	@data: block of bytes received
+ *	@len: length of received block
+ *
+ *	A UI or UIH frame has arrived which contains data for a channel
+ *	other than the control channel. If the relevant virtual tty is
+ *	open we shovel the bits down it, if not we drop them.
+ */
+
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+{
+	/* krefs .. */
+	struct tty_port *port = &dlci->port;
+	struct tty_struct *tty = tty_port_tty_get(port);
+	unsigned int modem = 0;
+
+	if (debug & 16)
+		printk("%d bytes for tty %p\n", len, tty);
+	if (tty) {
+		switch (dlci->adaption)  {
+			/* Unsupported types */
+			/* Packetised interruptible data */
+			case 4:
+				break;
+			/* Packetised uininterruptible voice/data */
+			case 3:
+				break;
+			/* Asynchronous serial with line state in each frame */
+			case 2:
+				while (gsm_read_ea(&modem, *data++) == 0) {
+					len--;
+					if (len == 0)
+						return;
+				}
+				gsm_process_modem(tty, dlci, modem);
+			/* Line state will go via DLCI 0 controls only */
+			case 1:
+			default:
+				tty_insert_flip_string(tty, data, len);
+				tty_flip_buffer_push(tty);
+		}
+		tty_kref_put(tty);
+	}
+}
+
+/**
+ *	gsm_dlci_control	-	data arrived on control channel
+ *	@dlci: channel
+ *	@data: block of bytes received
+ *	@len: length of received block
+ *
+ *	A UI or UIH frame has arrived which contains data for DLCI 0 the
+ *	control channel. This should contain a command EA followed by
+ *	control data bytes. The command EA contains a command/response bit
+ *	and we divide up the work accordingly.
+ */
+
+static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+{
+	/* See what command is involved */
+	unsigned int command = 0;
+	while (len-- > 0) {
+		if (gsm_read_ea(&command, *data++) == 1) {
+			int clen = *data++;
+			len--;
+			/* FIXME: this is properly an EA */
+			clen >>= 1;
+			/* Malformed command ? */
+			if (clen > len)
+				return;
+			if (command & 1)
+				gsm_control_message(dlci->gsm, command,
+								data, clen);
+			else
+				gsm_control_response(dlci->gsm, command,
+								data, clen);
+			return;
+		}
+	}
+}
+
+/*
+ *	Allocate/Free DLCI channels
+ */
+
+/**
+ *	gsm_dlci_alloc		-	allocate a DLCI
+ *	@gsm: GSM mux
+ *	@addr: address of the DLCI
+ *
+ *	Allocate and install a new DLCI object into the GSM mux.
+ *
+ *	FIXME: review locking races
+ */
+
+static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+{
+	struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
+	if (dlci == NULL)
+		return NULL;
+	spin_lock_init(&dlci->lock);
+	dlci->fifo = &dlci->_fifo;
+	if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+		kfree(dlci);
+		return NULL;
+	}
+
+	skb_queue_head_init(&dlci->skb_list);
+	init_timer(&dlci->t1);
+	dlci->t1.function = gsm_dlci_t1;
+	dlci->t1.data = (unsigned long)dlci;
+	tty_port_init(&dlci->port);
+	dlci->port.ops = &gsm_port_ops;
+	dlci->gsm = gsm;
+	dlci->addr = addr;
+	dlci->adaption = gsm->adaption;
+	dlci->state = DLCI_CLOSED;
+	if (addr)
+		dlci->data = gsm_dlci_data;
+	else
+		dlci->data = gsm_dlci_command;
+	gsm->dlci[addr] = dlci;
+	return dlci;
+}
+
+/**
+ *	gsm_dlci_free		-	release DLCI
+ *	@dlci: DLCI to destroy
+ *
+ *	Free up a DLCI. Currently to keep the lifetime rules sane we only
+ *	clean up DLCI objects when the MUX closes rather than as the port
+ *	is closed down on both the tty and mux levels.
+ *
+ *	Can sleep.
+ */
+static void gsm_dlci_free(struct gsm_dlci *dlci)
+{
+	struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+	if (tty) {
+		tty_vhangup(tty);
+		tty_kref_put(tty);
+	}
+	del_timer_sync(&dlci->t1);
+	dlci->gsm->dlci[dlci->addr] = NULL;
+	kfifo_free(dlci->fifo);
+	kfree(dlci);
+}
+
+
+/*
+ *	LAPBish link layer logic
+ */
+
+/**
+ *	gsm_queue		-	a GSM frame is ready to process
+ *	@gsm: pointer to our gsm mux
+ *
+ *	At this point in time a frame has arrived and been demangled from
+ *	the line encoding. All the differences between the encodings have
+ *	been handled below us and the frame is unpacked into the structures.
+ *	The fcs holds the header FCS but any data FCS must be added here.
+ */
+
+static void gsm_queue(struct gsm_mux *gsm)
+{
+	struct gsm_dlci *dlci;
+	u8 cr;
+	int address;
+	/* We have to sneak a look at the packet body to do the FCS.
+	   A somewhat layering violation in the spec */
+
+	if ((gsm->control & ~PF) == UI)
+		gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+	if (gsm->fcs != GOOD_FCS) {
+		gsm->bad_fcs++;
+		if (debug & 4)
+			printk("BAD FCS %02x\n", gsm->fcs);
+		return;
+	}
+	address = gsm->address >> 1;
+	if (address >= NUM_DLCI)
+		goto invalid;
+
+	cr = gsm->address & 1;		/* C/R bit */
+
+	gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
+
+	cr ^= 1 - gsm->initiator;	/* Flip so 1 always means command */
+	dlci = gsm->dlci[address];
+
+	switch (gsm->control) {
+	case SABM|PF:
+		if (cr == 0)
+			goto invalid;
+		if (dlci == NULL)
+			dlci = gsm_dlci_alloc(gsm, address);
+		if (dlci == NULL)
+			return;
+		if (dlci->dead)
+			gsm_response(gsm, address, DM);
+		else {
+			gsm_response(gsm, address, UA);
+			gsm_dlci_open(dlci);
+		}
+		break;
+	case DISC|PF:
+		if (cr == 0)
+			goto invalid;
+		if (dlci == NULL || dlci->state == DLCI_CLOSED) {
+			gsm_response(gsm, address, DM);
+			return;
+		}
+		/* Real close complete */
+		gsm_response(gsm, address, UA);
+		gsm_dlci_close(dlci);
+		break;
+	case UA:
+	case UA|PF:
+		if (cr == 0 || dlci == NULL)
+			break;
+		switch (dlci->state) {
+		case DLCI_CLOSING:
+			gsm_dlci_close(dlci);
+			break;
+		case DLCI_OPENING:
+			gsm_dlci_open(dlci);
+			break;
+		}
+		break;
+	case DM:	/* DM can be valid unsolicited */
+	case DM|PF:
+		if (cr)
+			goto invalid;
+		if (dlci == NULL)
+			return;
+		gsm_dlci_close(dlci);
+		break;
+	case UI:
+	case UI|PF:
+	case UIH:
+	case UIH|PF:
+#if 0
+		if (cr)
+			goto invalid;
+#endif
+		if (dlci == NULL || dlci->state != DLCI_OPEN) {
+			gsm_command(gsm, address, DM|PF);
+			return;
+		}
+		dlci->data(dlci, gsm->buf, gsm->len);
+		break;
+	default:
+		goto invalid;
+	}
+	return;
+invalid:
+	gsm->malformed++;
+	return;
+}
+
+
+/**
+ *	gsm0_receive	-	perform processing for non-transparency
+ *	@gsm: gsm data for this ldisc instance
+ *	@c: character
+ *
+ *	Receive bytes in gsm mode 0
+ */
+
+static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+{
+	switch (gsm->state) {
+	case GSM_SEARCH:	/* SOF marker */
+		if (c == GSM0_SOF) {
+			gsm->state = GSM_ADDRESS;
+			gsm->address = 0;
+			gsm->len = 0;
+			gsm->fcs = INIT_FCS;
+		}
+		break;		/* Address EA */
+	case GSM_ADDRESS:
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		if (gsm_read_ea(&gsm->address, c))
+			gsm->state = GSM_CONTROL;
+		break;
+	case GSM_CONTROL:	/* Control Byte */
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		gsm->control = c;
+		gsm->state = GSM_LEN;
+		break;
+	case GSM_LEN:		/* Length EA */
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		if (gsm_read_ea(&gsm->len, c)) {
+			if (gsm->len > gsm->mru) {
+				gsm->bad_size++;
+				gsm->state = GSM_SEARCH;
+				break;
+			}
+			gsm->count = 0;
+			gsm->state = GSM_DATA;
+		}
+		break;
+	case GSM_DATA:		/* Data */
+		gsm->buf[gsm->count++] = c;
+		if (gsm->count == gsm->len)
+			gsm->state = GSM_FCS;
+		break;
+	case GSM_FCS:		/* FCS follows the packet */
+		gsm->fcs = c;
+		gsm_queue(gsm);
+		/* And then back for the next frame */
+		gsm->state = GSM_SEARCH;
+		break;
+	}
+}
+
+/**
+ *	gsm0_receive	-	perform processing for non-transparency
+ *	@gsm: gsm data for this ldisc instance
+ *	@c: character
+ *
+ *	Receive bytes in mode 1 (Advanced option)
+ */
+
+static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+{
+	if (c == GSM1_SOF) {
+		/* EOF is only valid in frame if we have got to the data state
+		   and received at least one byte (the FCS) */
+		if (gsm->state == GSM_DATA && gsm->count) {
+			/* Extract the FCS */
+			gsm->count--;
+			gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+			gsm->len = gsm->count;
+			gsm_queue(gsm);
+			gsm->state  = GSM_START;
+			return;
+		}
+		/* Any partial frame was a runt so go back to start */
+		if (gsm->state != GSM_START) {
+			gsm->malformed++;
+			gsm->state = GSM_START;
+		}
+		/* A SOF in GSM_START means we are still reading idling or
+		   framing bytes */
+		return;
+	}
+
+	if (c == GSM1_ESCAPE) {
+		gsm->escape = 1;
+		return;
+	}
+
+	/* Only an unescaped SOF gets us out of GSM search */
+	if (gsm->state == GSM_SEARCH)
+		return;
+
+	if (gsm->escape) {
+		c ^= GSM1_ESCAPE_BITS;
+		gsm->escape = 0;
+	}
+	switch (gsm->state) {
+	case GSM_START:		/* First byte after SOF */
+		gsm->address = 0;
+		gsm->state = GSM_ADDRESS;
+		gsm->fcs = INIT_FCS;
+		/* Drop through */
+	case GSM_ADDRESS:	/* Address continuation */
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		if (gsm_read_ea(&gsm->address, c))
+			gsm->state = GSM_CONTROL;
+		break;
+	case GSM_CONTROL:	/* Control Byte */
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		gsm->control = c;
+		gsm->count = 0;
+		gsm->state = GSM_DATA;
+		break;
+	case GSM_DATA:		/* Data */
+		if (gsm->count > gsm->mru ) {	/* Allow one for the FCS */
+			gsm->state = GSM_OVERRUN;
+			gsm->bad_size++;
+		} else
+			gsm->buf[gsm->count++] = c;
+		break;
+	case GSM_OVERRUN:	/* Over-long - eg a dropped SOF */
+		break;
+	}
+}
+
+/**
+ *	gsm_error		-	handle tty error
+ *	@gsm: ldisc data
+ *	@data: byte received (may be invalid)
+ *	@flag: error received
+ *
+ *	Handle an error in the receipt of data for a frame. Currently we just
+ *	go back to hunting for a SOF.
+ *
+ *	FIXME: better diagnostics ?
+ */
+
+static void gsm_error(struct gsm_mux *gsm,
+				unsigned char data, unsigned char flag)
+{
+	gsm->state = GSM_SEARCH;
+	gsm->io_error++;
+}
+
+/**
+ *	gsm_cleanup_mux		-	generic GSM protocol cleanup
+ *	@gsm: our mux
+ *
+ *	Clean up the bits of the mux which are the same for all framing
+ *	protocols. Remove the mux from the mux table, stop all the timers
+ *	and then shut down each device hanging up the channels as we go.
+ */
+
+void gsm_cleanup_mux(struct gsm_mux *gsm)
+{
+	int i;
+	struct gsm_dlci *dlci = gsm->dlci[0];
+	struct gsm_msg *txq;
+
+	gsm->dead = 1;
+
+	spin_lock(&gsm_mux_lock);
+	for (i = 0; i < MAX_MUX; i++) {
+		if (gsm_mux[i] == gsm) {
+			gsm_mux[i] = NULL;
+			break;
+		}
+	}
+	spin_unlock(&gsm_mux_lock);
+	WARN_ON(i == MAX_MUX);
+
+	del_timer_sync(&gsm->t2_timer);
+	/* Now we are sure T2 has stopped */
+	if (dlci) {
+		dlci->dead = 1;
+		gsm_dlci_begin_close(dlci);
+		wait_event_interruptible(gsm->event,
+					dlci->state == DLCI_CLOSED);
+	}
+	/* Free up any link layer users */
+	for (i = 0; i < NUM_DLCI; i++)
+		if (gsm->dlci[i])
+			gsm_dlci_free(gsm->dlci[i]);
+	/* Now wipe the queues */
+	for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+		gsm->tx_head = txq->next;
+		kfree(txq);
+	}
+	gsm->tx_tail = NULL;
+}
+EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+/**
+ *	gsm_activate_mux	-	generic GSM setup
+ *	@gsm: our mux
+ *
+ *	Set up the bits of the mux which are the same for all framing
+ *	protocols. Add the mux to the mux table so it can be opened and
+ *	finally kick off connecting to DLCI 0 on the modem.
+ */
+
+int gsm_activate_mux(struct gsm_mux *gsm)
+{
+	struct gsm_dlci *dlci;
+	int i = 0;
+
+	init_timer(&gsm->t2_timer);
+	gsm->t2_timer.function = gsm_control_retransmit;
+	gsm->t2_timer.data = (unsigned long)gsm;
+	init_waitqueue_head(&gsm->event);
+	spin_lock_init(&gsm->control_lock);
+	spin_lock_init(&gsm->tx_lock);
+
+	if (gsm->encoding == 0)
+		gsm->receive = gsm0_receive;
+	else
+		gsm->receive = gsm1_receive;
+	gsm->error = gsm_error;
+
+	spin_lock(&gsm_mux_lock);
+	for (i = 0; i < MAX_MUX; i++) {
+		if (gsm_mux[i] == NULL) {
+			gsm_mux[i] = gsm;
+			break;
+		}
+	}
+	spin_unlock(&gsm_mux_lock);
+	if (i == MAX_MUX)
+		return -EBUSY;
+
+	dlci = gsm_dlci_alloc(gsm, 0);
+	if (dlci == NULL)
+		return -ENOMEM;
+	gsm->dead = 0;		/* Tty opens are now permissible */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gsm_activate_mux);
+
+/**
+ *	gsm_free_mux		-	free up a mux
+ *	@mux: mux to free
+ *
+ *	Dispose of allocated resources for a dead mux. No refcounting
+ *	at present so the mux must be truely dead.
+ */
+void gsm_free_mux(struct gsm_mux *gsm)
+{
+	kfree(gsm->txframe);
+	kfree(gsm->buf);
+	kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(gsm_free_mux);
+
+/**
+ *	gsm_alloc_mux		-	allocate a mux
+ *
+ *	Creates a new mux ready for activation.
+ */
+
+struct gsm_mux *gsm_alloc_mux(void)
+{
+	struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
+	if (gsm == NULL)
+		return NULL;
+	gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
+	if (gsm->buf == NULL) {
+		kfree(gsm);
+		return NULL;
+	}
+	gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+	if (gsm->txframe == NULL) {
+		kfree(gsm->buf);
+		kfree(gsm);
+		return NULL;
+	}
+	spin_lock_init(&gsm->lock);
+
+	gsm->t1 = T1;
+	gsm->t2 = T2;
+	gsm->n2 = N2;
+	gsm->ftype = UIH;
+	gsm->initiator = 0;
+	gsm->adaption = 1;
+	gsm->encoding = 1;
+	gsm->mru = 64;	/* Default to encoding 1 so these should be 64 */
+	gsm->mtu = 64;
+	gsm->dead = 1;	/* Avoid early tty opens */
+
+	return gsm;
+}
+EXPORT_SYMBOL_GPL(gsm_alloc_mux);
+
+
+
+
+/**
+ *	gsmld_output		-	write to link
+ *	@gsm: our mux
+ *	@data: bytes to output
+ *	@len: size
+ *
+ *	Write a block of data from the GSM mux to the data channel. This
+ *	will eventually be serialized from above but at the moment isn't.
+ */
+
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
+{
+	if (tty_write_room(gsm->tty) < len) {
+		set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
+		return -ENOSPC;
+	}
+	if (debug & 4) {
+		printk("-->%d bytes out\n", len);
+		hex_packet(data, len);
+	}
+	gsm->tty->ops->write(gsm->tty, data, len);
+	return len;
+}
+
+/**
+ *	gsmld_attach_gsm	-	mode set up
+ *	@tty: our tty structure
+ *	@gsm: our mux
+ *
+ *	Set up the MUX for basic mode and commence connecting to the
+ *	modem. Currently called from the line discipline set up but
+ *	will need moving to an ioctl path.
+ */
+
+static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+	int ret;
+
+	gsm->tty = tty_kref_get(tty);
+	gsm->output = gsmld_output;
+	ret =  gsm_activate_mux(gsm);
+	if (ret != 0)
+		tty_kref_put(gsm->tty);
+	return ret;
+}
+
+
+/**
+ *	gsmld_detach_gsm	-	stop doing 0710 mux
+ *	@tty: tty atttached to the mux
+ *	@gsm: mux
+ *
+ *	Shutdown and then clean up the resources used by the line discipline
+ */
+
+static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+	WARN_ON(tty != gsm->tty);
+	gsm_cleanup_mux(gsm);
+	tty_kref_put(gsm->tty);
+	gsm->tty = NULL;
+}
+
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+			      char *fp, int count)
+{
+	struct gsm_mux *gsm = tty->disc_data;
+	const unsigned char *dp;
+	char *f;
+	int i;
+	char buf[64];
+	char flags;
+
+	if (debug & 4) {
+		printk("Inbytes %dd\n", count);
+		hex_packet(cp, count);
+	}
+
+	for (i = count, dp = cp, f = fp; i; i--, dp++) {
+		flags = *f++;
+		switch (flags) {
+		case TTY_NORMAL:
+			gsm->receive(gsm, *dp);
+			break;
+		case TTY_OVERRUN:
+		case TTY_BREAK:
+		case TTY_PARITY:
+		case TTY_FRAME:
+			gsm->error(gsm, *dp, flags);
+			break;
+		default:
+			printk(KERN_ERR "%s: unknown flag %d\n",
+			       tty_name(tty, buf), flags);
+			break;
+		}
+	}
+	/* FASYNC if needed ? */
+	/* If clogged call tty_throttle(tty); */
+}
+
+/**
+ *	gsmld_chars_in_buffer	-	report available bytes
+ *	@tty: tty device
+ *
+ *	Report the number of characters buffered to be delivered to user
+ *	at this instant in time.
+ *
+ *	Locking: gsm lock
+ */
+
+static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
+{
+	return 0;
+}
+
+/**
+ *	gsmld_flush_buffer	-	clean input queue
+ *	@tty:	terminal device
+ *
+ *	Flush the input buffer. Called when the line discipline is
+ *	being closed, when the tty layer wants the buffer flushed (eg
+ *	at hangup).
+ */
+
+static void gsmld_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/**
+ *	gsmld_close		-	close the ldisc for this tty
+ *	@tty: device
+ *
+ *	Called from the terminal layer when this line discipline is
+ *	being shut down, either because of a close or becsuse of a
+ *	discipline change. The function will not be called while other
+ *	ldisc methods are in progress.
+ */
+
+static void gsmld_close(struct tty_struct *tty)
+{
+	struct gsm_mux *gsm = tty->disc_data;
+
+	gsmld_detach_gsm(tty, gsm);
+
+	gsmld_flush_buffer(tty);
+	/* Do other clean up here */
+	gsm_free_mux(gsm);
+}
+
+/**
+ *	gsmld_open		-	open an ldisc
+ *	@tty: terminal to open
+ *
+ *	Called when this line discipline is being attached to the
+ *	terminal device. Can sleep. Called serialized so that no
+ *	other events will occur in parallel. No further open will occur
+ *	until a close.
+ */
+
+static int gsmld_open(struct tty_struct *tty)
+{
+	struct gsm_mux *gsm;
+
+	if (tty->ops->write == NULL)
+		return -EINVAL;
+
+	/* Attach our ldisc data */
+	gsm = gsm_alloc_mux();
+	if (gsm == NULL)
+		return -ENOMEM;
+
+	tty->disc_data = gsm;
+	tty->receive_room = 65536;
+
+	/* Attach the initial passive connection */
+	gsm->encoding = 1;
+	return gsmld_attach_gsm(tty, gsm);
+}
+
+/**
+ *	gsmld_write_wakeup	-	asynchronous I/O notifier
+ *	@tty: tty device
+ *
+ *	Required for the ptys, serial driver etc. since processes
+ *	that attach themselves to the master and rely on ASYNC
+ *	IO must be woken up
+ */
+
+static void gsmld_write_wakeup(struct tty_struct *tty)
+{
+	struct gsm_mux *gsm = tty->disc_data;
+
+	/* Queue poll */
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	gsm_data_kick(gsm);
+	if (gsm->tx_bytes < TX_THRESH_LO)
+		gsm_dlci_data_sweep(gsm);
+}
+
+/**
+ *	gsmld_read		-	read function for tty
+ *	@tty: tty device
+ *	@file: file object
+ *	@buf: userspace buffer pointer
+ *	@nr: size of I/O
+ *
+ *	Perform reads for the line discipline. We are guaranteed that the
+ *	line discipline will not be closed under us but we may get multiple
+ *	parallel readers and must handle this ourselves. We may also get
+ *	a hangup. Always called in user context, may sleep.
+ *
+ *	This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+			 unsigned char __user *buf, size_t nr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ *	gsmld_write		-	write function for tty
+ *	@tty: tty device
+ *	@file: file object
+ *	@buf: userspace buffer pointer
+ *	@nr: size of I/O
+ *
+ *	Called when the owner of the device wants to send a frame
+ *	itself (or some other control data). The data is transferred
+ *	as-is and must be properly framed and checksummed as appropriate
+ *	by userspace. Frames are either sent whole or not at all as this
+ *	avoids pain user side.
+ */
+
+static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
+			   const unsigned char *buf, size_t nr)
+{
+	int space = tty_write_room(tty);
+	if (space >= nr)
+		return tty->ops->write(tty, buf, nr);
+	set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	return -ENOBUFS;
+}
+
+/**
+ *	gsmld_poll		-	poll method for N_GSM0710
+ *	@tty: terminal device
+ *	@file: file accessing it
+ *	@wait: poll table
+ *
+ *	Called when the line discipline is asked to poll() for data or
+ *	for special events. This code is not serialized with respect to
+ *	other events save open/close.
+ *
+ *	This code must be sure never to sleep through a hangup.
+ *	Called without the kernel lock held - fine
+ */
+
+static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+							poll_table *wait)
+{
+	unsigned int mask = 0;
+	struct gsm_mux *gsm = tty->disc_data;
+
+	poll_wait(file, &tty->read_wait, wait);
+	poll_wait(file, &tty->write_wait, wait);
+	if (tty_hung_up_p(file))
+		mask |= POLLHUP;
+	if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
+		mask |= POLLOUT | POLLWRNORM;
+	if (gsm->dead)
+		mask |= POLLHUP;
+	return mask;
+}
+
+static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
+							struct gsm_config *c)
+{
+	int need_close = 0;
+	int need_restart = 0;
+
+	/* Stuff we don't support yet - UI or I frame transport, windowing */
+	if ((c->adaption !=1 && c->adaption != 2) || c->k)
+		return -EOPNOTSUPP;
+	/* Check the MRU/MTU range looks sane */
+	if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+		return -EINVAL;
+	if (c->n2 < 3)
+		return -EINVAL;
+	if (c->encapsulation > 1)	/* Basic, advanced, no I */
+		return -EINVAL;
+	if (c->initiator > 1)
+		return -EINVAL;
+	if (c->i == 0 || c->i > 2)	/* UIH and UI only */
+		return -EINVAL;
+	/*
+	 *	See what is needed for reconfiguration
+	 */
+
+	/* Timing fields */
+	if (c->t1 != 0 && c->t1 != gsm->t1)
+		need_restart = 1;
+	if (c->t2 != 0 && c->t2 != gsm->t2)
+		need_restart = 1;
+	if (c->encapsulation != gsm->encoding)
+		need_restart = 1;
+	if (c->adaption != gsm->adaption)
+		need_restart = 1;
+	/* Requires care */
+	if (c->initiator != gsm->initiator)
+		need_close = 1;
+	if (c->mru != gsm->mru)
+		need_restart = 1;
+	if (c->mtu != gsm->mtu)
+		need_restart = 1;
+
+	/*
+	 *	Close down what is needed, restart and initiate the new
+	 *	configuration
+	 */
+
+	if (need_close || need_restart) {
+		gsm_dlci_begin_close(gsm->dlci[0]);
+		/* This will timeout if the link is down due to N2 expiring */
+		wait_event_interruptible(gsm->event,
+				gsm->dlci[0]->state == DLCI_CLOSED);
+		if (signal_pending(current))
+			return -EINTR;
+	}
+	if (need_restart)
+		gsm_cleanup_mux(gsm);
+
+	gsm->initiator = c->initiator;
+	gsm->mru = c->mru;
+	gsm->encoding = c->encapsulation;
+	gsm->adaption = c->adaption;
+
+	if (c->i == 1)
+		gsm->ftype = UIH;
+	else if (c->i == 2)
+		gsm->ftype = UI;
+
+	if (c->t1)
+		gsm->t1 = c->t1;
+	if (c->t2)
+		gsm->t2 = c->t2;
+
+	/* FIXME: We need to separate activation/deactivation from adding
+	   and removing from the mux array */
+	if (need_restart)
+		gsm_activate_mux(gsm);
+	if (gsm->initiator && need_close)
+		gsm_dlci_begin_open(gsm->dlci[0]);
+	return 0;
+}
+
+static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
+		       unsigned int cmd, unsigned long arg)
+{
+	struct gsm_config c;
+	struct gsm_mux *gsm = tty->disc_data;
+
+	switch (cmd) {
+	case GSMIOC_GETCONF:
+		memset(&c, 0, sizeof(c));
+		c.adaption = gsm->adaption;
+		c.encapsulation = gsm->encoding;
+		c.initiator = gsm->initiator;
+		c.t1 = gsm->t1;
+		c.t2 = gsm->t2;
+		c.t3 = 0;	/* Not supported */
+		c.n2 = gsm->n2;
+		if (gsm->ftype == UIH)
+			c.i = 1;
+		else
+			c.i = 2;
+		printk("Ftype %d i %d\n", gsm->ftype, c.i);
+		c.mru = gsm->mru;
+		c.mtu = gsm->mtu;
+		c.k = 0;
+		if (copy_to_user((void *)arg, &c, sizeof(c)))
+			return -EFAULT;
+		return 0;
+	case GSMIOC_SETCONF:
+		if (copy_from_user(&c, (void *)arg, sizeof(c)))
+			return -EFAULT;
+		return gsmld_config(tty, gsm, &c);
+	default:
+		return n_tty_ioctl_helper(tty, file, cmd, arg);
+	}
+}
+
+
+/* Line discipline for real tty */
+struct tty_ldisc_ops tty_ldisc_packet = {
+	.owner		 = THIS_MODULE,
+	.magic           = TTY_LDISC_MAGIC,
+	.name            = "n_gsm",
+	.open            = gsmld_open,
+	.close           = gsmld_close,
+	.flush_buffer    = gsmld_flush_buffer,
+	.chars_in_buffer = gsmld_chars_in_buffer,
+	.read            = gsmld_read,
+	.write           = gsmld_write,
+	.ioctl           = gsmld_ioctl,
+	.poll            = gsmld_poll,
+	.receive_buf     = gsmld_receive_buf,
+	.write_wakeup    = gsmld_write_wakeup
+};
+
+/*
+ *	Virtual tty side
+ */
+
+#define TX_SIZE		512
+
+static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+	u8 modembits[5];
+	struct gsm_control *ctrl;
+	int len = 2;
+
+	if (brk)
+		len++;
+
+	modembits[0] = len << 1 | EA;		/* Data bytes */
+	modembits[1] = dlci->addr << 2 | 3;	/* DLCI, EA, 1 */
+	modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
+	if (brk)
+		modembits[3] = brk << 4 | 2 | EA;	/* Valid, EA */
+	ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+	if (ctrl == NULL)
+		return -ENOMEM;
+	return gsm_control_wait(dlci->gsm, ctrl);
+}
+
+static int gsm_carrier_raised(struct tty_port *port)
+{
+	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+	/* Not yet open so no carrier info */
+	if (dlci->state != DLCI_OPEN)
+		return 0;
+	if (debug & 2)
+		return 1;
+	return dlci->modem_rx & TIOCM_CD;
+}
+
+static void gsm_dtr_rts(struct tty_port *port, int onoff)
+{
+	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+	unsigned int modem_tx = dlci->modem_tx;
+	if (onoff)
+		modem_tx |= TIOCM_DTR | TIOCM_RTS;
+	else
+		modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
+	if (modem_tx != dlci->modem_tx) {
+		dlci->modem_tx = modem_tx;
+		gsmtty_modem_update(dlci, 0);
+	}
+}
+
+static const struct tty_port_operations gsm_port_ops = {
+	.carrier_raised = gsm_carrier_raised,
+	.dtr_rts = gsm_dtr_rts,
+};
+
+
+static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+{
+	struct gsm_mux *gsm;
+	struct gsm_dlci *dlci;
+	struct tty_port *port;
+	unsigned int line = tty->index;
+	unsigned int mux = line >> 6;
+
+	line = line & 0x3F;
+
+	if (mux >= MAX_MUX)
+		return -ENXIO;
+	/* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
+	if (gsm_mux[mux] == NULL)
+		return -EUNATCH;
+	if (line == 0 || line > 61)	/* 62/63 reserved */
+		return -ECHRNG;
+	gsm = gsm_mux[mux];
+	if (gsm->dead)
+		return -EL2HLT;
+	dlci = gsm->dlci[line];
+	if (dlci == NULL)
+		dlci = gsm_dlci_alloc(gsm, line);
+	if (dlci == NULL)
+		return -ENOMEM;
+	port = &dlci->port;
+	port->count++;
+	tty->driver_data = dlci;
+	tty_port_tty_set(port, tty);
+
+	dlci->modem_rx = 0;
+	/* We could in theory open and close before we wait - eg if we get
+	   a DM straight back. This is ok as that will have caused a hangup */
+	set_bit(ASYNCB_INITIALIZED, &port->flags);
+	/* Start sending off SABM messages */
+	gsm_dlci_begin_open(dlci);
+	/* And wait for virtual carrier */
+	return tty_port_block_til_ready(port, tty, filp);
+}
+
+static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	if (dlci == NULL)
+		return;
+	if (tty_port_close_start(&dlci->port, tty, filp) == 0)
+		return;
+	gsm_dlci_begin_close(dlci);
+	tty_port_close_end(&dlci->port, tty);
+	tty_port_tty_set(&dlci->port, NULL);
+}
+
+static void gsmtty_hangup(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	tty_port_hangup(&dlci->port);
+	gsm_dlci_begin_close(dlci);
+}
+
+static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+								    int len)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	/* Stuff the bytes into the fifo queue */
+	int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+	/* Need to kick the channel */
+	gsm_dlci_data_kick(dlci);
+	return sent;
+}
+
+static int gsmtty_write_room(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	return TX_SIZE - kfifo_len(dlci->fifo);
+}
+
+static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	return kfifo_len(dlci->fifo);
+}
+
+static void gsmtty_flush_buffer(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	/* Caution needed: If we implement reliable transport classes
+	   then the data being transmitted can't simply be junked once
+	   it has first hit the stack. Until then we can just blow it
+	   away */
+	kfifo_reset(dlci->fifo);
+	/* Need to unhook this DLCI from the transmit queue logic */
+}
+
+static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+	/* The FIFO handles the queue so the kernel will do the right
+	   thing waiting on chars_in_buffer before calling us. No work
+	   to do here */
+}
+
+static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	return dlci->modem_rx;
+}
+
+static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp,
+	unsigned int set, unsigned int clear)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	unsigned int modem_tx = dlci->modem_tx;
+
+	modem_tx &= clear;
+	modem_tx |= set;
+
+	if (modem_tx != dlci->modem_tx) {
+		dlci->modem_tx = modem_tx;
+		return gsmtty_modem_update(dlci, 0);
+	}
+	return 0;
+}
+
+
+static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp,
+			unsigned int cmd, unsigned long arg)
+{
+	return -ENOIOCTLCMD;
+}
+
+static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+	/* For the moment its fixed. In actual fact the speed information
+	   for the virtual channel can be propogated in both directions by
+	   the RPN control message. This however rapidly gets nasty as we
+	   then have to remap modem signals each way according to whether
+	   our virtual cable is null modem etc .. */
+	tty_termios_copy_hw(tty->termios, old);
+}
+
+static void gsmtty_throttle(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	if (tty->termios->c_cflag & CRTSCTS)
+		dlci->modem_tx &= ~TIOCM_DTR;
+	dlci->throttled = 1;
+	/* Send an MSC with DTR cleared */
+	gsmtty_modem_update(dlci, 0);
+}
+
+static void gsmtty_unthrottle(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	if (tty->termios->c_cflag & CRTSCTS)
+		dlci->modem_tx |= TIOCM_DTR;
+	dlci->throttled = 0;
+	/* Send an MSC with DTR set */
+	gsmtty_modem_update(dlci, 0);
+}
+
+static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	int encode = 0;	/* Off */
+
+	if (state == -1)	/* "On indefinitely" - we can't encode this
+				    properly */
+		encode = 0x0F;
+	else if (state > 0) {
+		encode = state / 200;	/* mS to encoding */
+		if (encode > 0x0F)
+			encode = 0x0F;	/* Best effort */
+	}
+	return gsmtty_modem_update(dlci, encode);
+}
+
+static struct tty_driver *gsm_tty_driver;
+
+/* Virtual ttys for the demux */
+static const struct tty_operations gsmtty_ops = {
+	.open			= gsmtty_open,
+	.close			= gsmtty_close,
+	.write			= gsmtty_write,
+	.write_room		= gsmtty_write_room,
+	.chars_in_buffer	= gsmtty_chars_in_buffer,
+	.flush_buffer		= gsmtty_flush_buffer,
+	.ioctl			= gsmtty_ioctl,
+	.throttle		= gsmtty_throttle,
+	.unthrottle		= gsmtty_unthrottle,
+	.set_termios		= gsmtty_set_termios,
+	.hangup			= gsmtty_hangup,
+	.wait_until_sent	= gsmtty_wait_until_sent,
+	.tiocmget		= gsmtty_tiocmget,
+	.tiocmset		= gsmtty_tiocmset,
+	.break_ctl		= gsmtty_break_ctl,
+};
+
+
+
+static int __init gsm_init(void)
+{
+	/* Fill in our line protocol discipline, and register it */
+	int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+	if (status != 0) {
+		printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status);
+		return status;
+	}
+
+	gsm_tty_driver = alloc_tty_driver(256);
+	if (!gsm_tty_driver) {
+		tty_unregister_ldisc(N_GSM0710);
+		printk(KERN_ERR "gsm_init: tty allocation failed.\n");
+		return -EINVAL;
+	}
+	gsm_tty_driver->owner	= THIS_MODULE;
+	gsm_tty_driver->driver_name	= "gsmtty";
+	gsm_tty_driver->name		= "gsmtty";
+	gsm_tty_driver->major		= 0;	/* Dynamic */
+	gsm_tty_driver->minor_start	= 0;
+	gsm_tty_driver->type		= TTY_DRIVER_TYPE_SERIAL;
+	gsm_tty_driver->subtype	= SERIAL_TYPE_NORMAL;
+	gsm_tty_driver->flags	= TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+							| TTY_DRIVER_HARDWARE_BREAK;
+	gsm_tty_driver->init_termios	= tty_std_termios;
+	/* Fixme */
+	gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
+	tty_set_operations(gsm_tty_driver, &gsmtty_ops);
+
+	spin_lock_init(&gsm_mux_lock);
+
+	if (tty_register_driver(gsm_tty_driver)) {
+		put_tty_driver(gsm_tty_driver);
+		tty_unregister_ldisc(N_GSM0710);
+		printk(KERN_ERR "gsm_init: tty registration failed.\n");
+		return -EBUSY;
+	}
+	printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start);
+	return 0;
+}
+
+static void __exit gsm_exit(void)
+{
+	int status = tty_unregister_ldisc(N_GSM0710);
+	if (status != 0)
+		printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status);
+	tty_unregister_driver(gsm_tty_driver);
+	put_tty_driver(gsm_tty_driver);
+	printk(KERN_INFO "gsm_init: unloaded.\n");
+}
+
+module_init(gsm_init);
+module_exit(gsm_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GSM0710);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2fd3d39..8d85587 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -257,6 +257,7 @@
 #define INPUT_POOL_WORDS 128
 #define OUTPUT_POOL_WORDS 32
 #define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
 
 /*
  * The minimum number of bits of entropy before we wake up a read on
@@ -414,7 +415,7 @@
 	unsigned add_ptr;
 	int entropy_count;
 	int input_rotate;
-	__u8 *last_data;
+	__u8 last_data[EXTRACT_SIZE];
 };
 
 static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -714,8 +715,6 @@
 }
 #endif
 
-#define EXTRACT_SIZE 10
-
 /*********************************************************************
  *
  * Entropy extraction routines
@@ -862,7 +861,7 @@
 	while (nbytes) {
 		extract_buf(r, tmp);
 
-		if (r->last_data) {
+		if (fips_enabled) {
 			spin_lock_irqsave(&r->lock, flags);
 			if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
 				panic("Hardware RNG duplicated output!\n");
@@ -951,9 +950,6 @@
 	now = ktime_get_real();
 	mix_pool_bytes(r, &now, sizeof(now));
 	mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
-	/* Enable continuous test in fips mode */
-	if (fips_enabled)
-		r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
 }
 
 static int rand_initialize(void)
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 78a62eb..ecbe479 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -176,23 +176,6 @@
 static void show_status(int);
 #endif
 
-#ifdef CONFIG_REMOTE_DEBUG
-static void debug_setup(void);
-void queueDebugChar(int c);
-int getDebugChar(void);
-
-#define DEBUG_PORT	1
-#define DEBUG_LEN	256
-
-typedef struct {
-	int in;
-	int out;
-	unsigned char buf[DEBUG_LEN];
-} debugq;
-
-debugq debugiq;
-#endif
-
 /*
  * I have my own version of udelay(), as it is needed when initialising
  * the chip, before the delay loop has been calibrated.  Should probably
@@ -515,11 +498,6 @@
 	/* determine the channel and change to that context */
 	channel = (u_short) (base_addr[CyLICR] >> 2);
 
-#ifdef CONFIG_REMOTE_DEBUG
-	if (channel == DEBUG_PORT) {
-		panic("TxInt on debug port!!!");
-	}
-#endif
 	/* validate the port number (as configured and open) */
 	if ((channel < 0) || (NR_PORTS <= channel)) {
 		base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
@@ -634,14 +612,6 @@
 	info->last_active = jiffies;
 	save_cnt = char_count = base_addr[CyRFOC];
 
-#ifdef CONFIG_REMOTE_DEBUG
-	if (channel == DEBUG_PORT) {
-		while (char_count--) {
-			data = base_addr[CyRDR];
-			queueDebugChar(data);
-		}
-	} else
-#endif
 		/* if there is nowhere to put the data, discard it */
 	if (info->tty == 0) {
 		while (char_count--) {
@@ -2195,9 +2165,7 @@
 		port_num++;
 		info++;
 	}
-#ifdef CONFIG_REMOTE_DEBUG
-	debug_setup();
-#endif
+
 	ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0,
 			  "cd2401_errors", cd2401_rxerr_interrupt);
 	if (ret) {
@@ -2518,193 +2486,4 @@
 
 console_initcall(serial167_console_init);
 
-#ifdef CONFIG_REMOTE_DEBUG
-void putDebugChar(int c)
-{
-	volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
-	unsigned long flags;
-	volatile u_char sink;
-	u_char ier;
-	int port;
-
-	local_irq_save(flags);
-
-	/* Ensure transmitter is enabled! */
-
-	port = DEBUG_PORT;
-	base_addr[CyCAR] = (u_char) port;
-	while (base_addr[CyCCR])
-		;
-	base_addr[CyCCR] = CyENB_XMTR;
-
-	ier = base_addr[CyIER];
-	base_addr[CyIER] = CyTxMpty;
-
-	while (1) {
-		if (pcc2chip[PccSCCTICR] & 0x20) {
-			/* We have a Tx int. Acknowledge it */
-			sink = pcc2chip[PccTPIACKR];
-			if ((base_addr[CyLICR] >> 2) == port) {
-				base_addr[CyTDR] = c;
-				base_addr[CyTEOIR] = 0;
-				break;
-			} else
-				base_addr[CyTEOIR] = CyNOTRANS;
-		}
-	}
-
-	base_addr[CyIER] = ier;
-
-	local_irq_restore(flags);
-}
-
-int getDebugChar()
-{
-	volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
-	unsigned long flags;
-	volatile u_char sink;
-	u_char ier;
-	int port;
-	int i, c;
-
-	i = debugiq.out;
-	if (i != debugiq.in) {
-		c = debugiq.buf[i];
-		if (++i == DEBUG_LEN)
-			i = 0;
-		debugiq.out = i;
-		return c;
-	}
-	/* OK, nothing in queue, wait in poll loop */
-
-	local_irq_save(flags);
-
-	/* Ensure receiver is enabled! */
-
-	port = DEBUG_PORT;
-	base_addr[CyCAR] = (u_char) port;
-#if 0
-	while (base_addr[CyCCR])
-		;
-	base_addr[CyCCR] = CyENB_RCVR;
-#endif
-	ier = base_addr[CyIER];
-	base_addr[CyIER] = CyRxData;
-
-	while (1) {
-		if (pcc2chip[PccSCCRICR] & 0x20) {
-			/* We have a Rx int. Acknowledge it */
-			sink = pcc2chip[PccRPIACKR];
-			if ((base_addr[CyLICR] >> 2) == port) {
-				int cnt = base_addr[CyRFOC];
-				while (cnt-- > 0) {
-					c = base_addr[CyRDR];
-					if (c == 0)
-						printk
-						    ("!! debug char is null (cnt=%d) !!",
-						     cnt);
-					else
-						queueDebugChar(c);
-				}
-				base_addr[CyREOIR] = 0;
-				i = debugiq.out;
-				if (i == debugiq.in)
-					panic("Debug input queue empty!");
-				c = debugiq.buf[i];
-				if (++i == DEBUG_LEN)
-					i = 0;
-				debugiq.out = i;
-				break;
-			} else
-				base_addr[CyREOIR] = CyNOTRANS;
-		}
-	}
-
-	base_addr[CyIER] = ier;
-
-	local_irq_restore(flags);
-
-	return (c);
-}
-
-void queueDebugChar(int c)
-{
-	int i;
-
-	i = debugiq.in;
-	debugiq.buf[i] = c;
-	if (++i == DEBUG_LEN)
-		i = 0;
-	if (i != debugiq.out)
-		debugiq.in = i;
-}
-
-static void debug_setup()
-{
-	unsigned long flags;
-	volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
-	int i, cflag;
-
-	cflag = B19200;
-
-	local_irq_save(flags);
-
-	for (i = 0; i < 4; i++) {
-		base_addr[CyCAR] = i;
-		base_addr[CyLICR] = i << 2;
-	}
-
-	debugiq.in = debugiq.out = 0;
-
-	base_addr[CyCAR] = DEBUG_PORT;
-
-	/* baud rate */
-	i = cflag & CBAUD;
-
-	base_addr[CyIER] = 0;
-
-	base_addr[CyCMR] = CyASYNC;
-	base_addr[CyLICR] = DEBUG_PORT << 2;
-	base_addr[CyLIVR] = 0x5c;
-
-	/* tx and rx baud rate */
-
-	base_addr[CyTCOR] = baud_co[i];
-	base_addr[CyTBPR] = baud_bpr[i];
-	base_addr[CyRCOR] = baud_co[i] >> 5;
-	base_addr[CyRBPR] = baud_bpr[i];
-
-	/* set line characteristics  according configuration */
-
-	base_addr[CySCHR1] = 0;
-	base_addr[CySCHR2] = 0;
-	base_addr[CySCRL] = 0;
-	base_addr[CySCRH] = 0;
-	base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE;
-	base_addr[CyCOR2] = 0;
-	base_addr[CyCOR3] = Cy_1_STOP;
-	base_addr[CyCOR4] = baud_cor4[i];
-	base_addr[CyCOR5] = 0;
-	base_addr[CyCOR6] = 0;
-	base_addr[CyCOR7] = 0;
-
-	write_cy_cmd(base_addr, CyINIT_CHAN);
-	write_cy_cmd(base_addr, CyENB_RCVR);
-
-	base_addr[CyCAR] = DEBUG_PORT;	/* !!! Is this needed? */
-
-	base_addr[CyRTPRL] = 2;
-	base_addr[CyRTPRH] = 0;
-
-	base_addr[CyMSVR1] = CyRTS;
-	base_addr[CyMSVR2] = CyDTR;
-
-	base_addr[CyIER] = CyRxData;
-
-	local_irq_restore(flags);
-
-}				/* debug_setup */
-
-#endif
-
 MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 7ee5216..cc1e985 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -238,7 +238,7 @@
  *	@size: size
  *
  *	Queue a series of bytes to the tty buffering. All the characters
- *	passed are marked as without error. Returns the number added.
+ *	passed are marked with the supplied flag. Returns the number added.
  *
  *	Locking: Called functions may take tty->buf.lock
  */
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d..fbf94cf 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -170,6 +170,18 @@
 
 	  Currently the driver supports AES in ECB and CBC mode without DMA.
 
+config CRYPTO_DEV_NIAGARA2
+       tristate "Niagara2 Stream Processing Unit driver"
+       select CRYPTO_ALGAPI
+       depends on SPARC64
+       help
+	  Each core of a Niagara2 processor contains a Stream
+	  Processing Unit, which itself contains several cryptographic
+	  sub-units.  One set provides the Modular Arithmetic Unit,
+	  used for SSL offload.  The other set provides the Cipher
+	  Group, which can perform encryption, decryption, hashing,
+	  checksumming, and raw copies.
+
 config CRYPTO_DEV_HIFN_795X
 	tristate "Driver HIFN 795x crypto accelerator chips"
 	select CRYPTO_DES
@@ -222,4 +234,13 @@
 	help
 	  This option allows you to have support for AMCC crypto acceleration.
 
+config CRYPTO_DEV_OMAP_SHAM
+	tristate "Support for OMAP SHA1/MD5 hw accelerator"
+	depends on ARCH_OMAP2 || ARCH_OMAP3
+	select CRYPTO_SHA1
+	select CRYPTO_MD5
+	help
+	  OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+	  want to use the OMAP module for SHA1/MD5 algorithms.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f..6dbbe00 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,8 +1,12 @@
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-objs := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43..09389dd 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 
 #include "geode-aes.h"
 
 /* Static structures */
 
-static void __iomem * _iobase;
+static void __iomem *_iobase;
 static spinlock_t lock;
 
 /* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@
 _writefield(u32 offset, void *value)
 {
 	int i;
-	for(i = 0; i < 4; i++)
+	for (i = 0; i < 4; i++)
 		iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
 }
 
@@ -39,7 +39,7 @@
 _readfield(u32 offset, void *value)
 {
 	int i;
-	for(i = 0; i < 4; i++)
+	for (i = 0; i < 4; i++)
 		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
 }
 
@@ -59,7 +59,7 @@
 	do {
 		status = ioread32(_iobase + AES_INTR_REG);
 		cpu_relax();
-	} while(!(status & AES_INTRA_PENDING) && --counter);
+	} while (!(status & AES_INTRA_PENDING) && --counter);
 
 	/* Clear the event */
 	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@
 	err = blkcipher_walk_virt(desc, &walk);
 	op->iv = walk.iv;
 
-	while((nbytes = walk.nbytes)) {
+	while ((nbytes = walk.nbytes)) {
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@
 	err = blkcipher_walk_virt(desc, &walk);
 	op->iv = walk.iv;
 
-	while((nbytes = walk.nbytes)) {
+	while ((nbytes = walk.nbytes)) {
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
-	while((nbytes = walk.nbytes)) {
+	while ((nbytes = walk.nbytes)) {
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
-	while((nbytes = walk.nbytes)) {
+	while ((nbytes = walk.nbytes)) {
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@
 geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	int ret;
-
-	if ((ret = pci_enable_device(dev)))
+	ret = pci_enable_device(dev);
+	if (ret)
 		return ret;
 
-	if ((ret = pci_request_regions(dev, "geode-aes")))
+	ret = pci_request_regions(dev, "geode-aes");
+	if (ret)
 		goto eenable;
 
 	_iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@
 	/* Clear any pending activity */
 	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
 
-	if ((ret = crypto_register_alg(&geode_alg)))
+	ret = crypto_register_alg(&geode_alg);
+	if (ret)
 		goto eiomap;
 
-	if ((ret = crypto_register_alg(&geode_ecb_alg)))
+	ret = crypto_register_alg(&geode_ecb_alg);
+	if (ret)
 		goto ealg;
 
-	if ((ret = crypto_register_alg(&geode_cbc_alg)))
+	ret = crypto_register_alg(&geode_cbc_alg);
+	if (ret)
 		goto eecb;
 
 	printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 73e8b17..16fce3a 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -638,7 +638,7 @@
 
 #define ASYNC_FLAGS_MISALIGNED	(1<<0)
 
-struct ablkcipher_walk
+struct hifn_cipher_walk
 {
 	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
 	u32			flags;
@@ -657,7 +657,7 @@
 	u8			*iv;
 	unsigned int		ivsize;
 	u8			op, type, mode, unused;
-	struct ablkcipher_walk	walk;
+	struct hifn_cipher_walk	walk;
 };
 
 #define crypto_alg_to_hifn(a)	container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@
 	return 0;
 }
 
-static int ablkcipher_walk_init(struct ablkcipher_walk *w,
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
 		int num, gfp_t gfp_flags)
 {
 	int i;
@@ -1442,7 +1442,7 @@
 	return i;
 }
 
-static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
 {
 	int i;
 
@@ -1486,8 +1486,8 @@
 	return idx;
 }
 
-static int ablkcipher_walk(struct ablkcipher_request *req,
-		struct ablkcipher_walk *w)
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+		struct hifn_cipher_walk *w)
 {
 	struct scatterlist *dst, *t;
 	unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@
 	}
 
 	if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
-		err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+		err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
 		if (err < 0)
 			return err;
 	}
 
-	sg_num = ablkcipher_walk(req, &rctx->walk);
+	sg_num = hifn_cipher_walk(req, &rctx->walk);
 	if (sg_num < 0) {
 		err = sg_num;
 		goto err_out_exit;
@@ -1806,7 +1806,7 @@
 			kunmap_atomic(saddr, KM_SOFTIRQ0);
 		}
 
-		ablkcipher_walk_exit(&rctx->walk);
+		hifn_cipher_walk_exit(&rctx->walk);
 	}
 
 	req->base.complete(&req->base, error);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012..e095422 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
 
 #include "mv_cesa.h"
+
+#define MV_CESA	"MV-CESA:"
+#define MAX_HW_HASH_SIZE	0xFFFF
+
 /*
  * STM:
  *   /---------------------------------------\
@@ -39,10 +45,12 @@
  * @dst_sg_it:		sg iterator for dst
  * @sg_src_left:	bytes left in src to process (scatter list)
  * @src_start:		offset to add to src start position (scatter list)
- * @crypt_len:		length of current crypt process
+ * @crypt_len:		length of current hw crypt/hash process
+ * @hw_nbytes:		total bytes to process in hw for this request
+ * @copy_back:		whether to copy data back (crypt) or not (hash)
  * @sg_dst_left:	bytes left dst to process in this scatter list
  * @dst_start:		offset to add to dst start position (scatter list)
- * @total_req_bytes:	total number of bytes processed (request).
+ * @hw_processed_bytes:	number of bytes processed by hw (request).
  *
  * sg helper are used to iterate over the scatterlist. Since the size of the
  * SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@
 struct req_progress {
 	struct sg_mapping_iter src_sg_it;
 	struct sg_mapping_iter dst_sg_it;
+	void (*complete) (void);
+	void (*process) (int is_first);
 
 	/* src mostly */
 	int sg_src_left;
 	int src_start;
 	int crypt_len;
+	int hw_nbytes;
 	/* dst mostly */
+	int copy_back;
 	int sg_dst_left;
 	int dst_start;
-	int total_req_bytes;
+	int hw_processed_bytes;
 };
 
 struct crypto_priv {
@@ -72,10 +84,12 @@
 	spinlock_t lock;
 	struct crypto_queue queue;
 	enum engine_status eng_st;
-	struct ablkcipher_request *cur_req;
+	struct crypto_async_request *cur_req;
 	struct req_progress p;
 	int max_req_size;
 	int sram_size;
+	int has_sha1;
+	int has_hmac_sha1;
 };
 
 static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@
 	int decrypt;
 };
 
+enum hash_op {
+	COP_SHA1,
+	COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+	struct crypto_shash *fallback;
+	struct crypto_shash *base_hash;
+	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+	int count_add;
+	enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+	u64 count;
+	u32 state[SHA1_DIGEST_SIZE / 4];
+	u8 buffer[SHA1_BLOCK_SIZE];
+	int first_hash;		/* marks that we don't have previous state */
+	int last_chunk;		/* marks that this is the 'final' request */
+	int extra_bytes;	/* unprocessed bytes in buffer */
+	enum hash_op op;
+	int count_add;
+	struct scatterlist dummysg;
+};
+
 static void compute_aes_dec_key(struct mv_ctx *ctx)
 {
 	struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@
 	return 0;
 }
 
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
 	int ret;
-	void *buf;
+	void *sbuf;
+	int copied = 0;
 
-	if (!cpg->p.sg_src_left) {
-		ret = sg_miter_next(&cpg->p.src_sg_it);
-		BUG_ON(!ret);
-		cpg->p.sg_src_left = cpg->p.src_sg_it.length;
-		cpg->p.src_start = 0;
+	while (1) {
+		if (!p->sg_src_left) {
+			ret = sg_miter_next(&p->src_sg_it);
+			BUG_ON(!ret);
+			p->sg_src_left = p->src_sg_it.length;
+			p->src_start = 0;
+		}
+
+		sbuf = p->src_sg_it.addr + p->src_start;
+
+		if (p->sg_src_left <= len - copied) {
+			memcpy(dbuf + copied, sbuf, p->sg_src_left);
+			copied += p->sg_src_left;
+			p->sg_src_left = 0;
+			if (copied >= len)
+				break;
+		} else {
+			int copy_len = len - copied;
+			memcpy(dbuf + copied, sbuf, copy_len);
+			p->src_start += copy_len;
+			p->sg_src_left -= copy_len;
+			break;
+		}
 	}
+}
 
-	cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
-	buf = cpg->p.src_sg_it.addr;
-	buf += cpg->p.src_start;
-
-	memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
-
-	cpg->p.sg_src_left -= cpg->p.crypt_len;
-	cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+	struct req_progress *p = &cpg->p;
+	int data_in_sram =
+	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+			data_in_sram - p->crypt_len);
+	p->crypt_len = data_in_sram;
 }
 
 static void mv_process_current_q(int first_block)
 {
-	struct ablkcipher_request *req = cpg->cur_req;
+	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct sec_accel_config op;
@@ -179,6 +237,7 @@
 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
 		break;
 	case COP_AES_CBC:
+	default:
 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
 		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
 			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@
 		ENC_P_DST(SRAM_DATA_OUT_START);
 	op.enc_key_p = SRAM_DATA_KEY_P;
 
-	setup_data_in(req);
+	setup_data_in();
 	op.enc_len = cpg->p.crypt_len;
 	memcpy(cpg->sram + SRAM_CONFIG, &op,
 			sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@
 
 static void mv_crypto_algo_completion(void)
 {
-	struct ablkcipher_request *req = cpg->cur_req;
+	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 
+	sg_miter_stop(&cpg->p.src_sg_it);
+	sg_miter_stop(&cpg->p.dst_sg_it);
+
 	if (req_ctx->op != COP_AES_CBC)
 		return ;
 
 	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
 }
 
+static void mv_process_hash_current(int first_block)
+{
+	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+	struct req_progress *p = &cpg->p;
+	struct sec_accel_config op = { 0 };
+	int is_last;
+
+	switch (req_ctx->op) {
+	case COP_SHA1:
+	default:
+		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+		break;
+	case COP_HMAC_SHA1:
+		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+		break;
+	}
+
+	op.mac_src_p =
+		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+		req_ctx->
+		count);
+
+	setup_data_in();
+
+	op.mac_digest =
+		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+	op.mac_iv =
+		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+	is_last = req_ctx->last_chunk
+		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
+	if (req_ctx->first_hash) {
+		if (is_last)
+			op.config |= CFG_NOT_FRAG;
+		else
+			op.config |= CFG_FIRST_FRAG;
+
+		req_ctx->first_hash = 0;
+	} else {
+		if (is_last)
+			op.config |= CFG_LAST_FRAG;
+		else
+			op.config |= CFG_MID_FRAG;
+	}
+
+	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+	/* GO */
+	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+	/*
+	* XXX: add timer if the interrupt does not occur for some mystery
+	* reason
+	*/
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+					  struct shash_desc *desc)
+{
+	int i;
+	struct sha1_state shash_state;
+
+	shash_state.count = ctx->count + ctx->count_add;
+	for (i = 0; i < 5; i++)
+		shash_state.state[i] = ctx->state[i];
+	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+	return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+	} desc;
+	int rc;
+
+	desc.shash.tfm = tfm_ctx->fallback;
+	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+	if (unlikely(req_ctx->first_hash)) {
+		crypto_shash_init(&desc.shash);
+		crypto_shash_update(&desc.shash, req_ctx->buffer,
+				    req_ctx->extra_bytes);
+	} else {
+		/* only SHA1 for now....
+		 */
+		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+		if (rc)
+			goto out;
+	}
+	rc = crypto_shash_final(&desc.shash, req->result);
+out:
+	return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+	if (ctx->extra_bytes)
+		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+	sg_miter_stop(&cpg->p.src_sg_it);
+
+	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+	if (likely(ctx->last_chunk)) {
+		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+			       crypto_ahash_digestsize(crypto_ahash_reqtfm
+						       (req)));
+		} else
+			mv_hash_final_fallback(req);
+	}
+}
+
 static void dequeue_complete_req(void)
 {
-	struct ablkcipher_request *req = cpg->cur_req;
+	struct crypto_async_request *req = cpg->cur_req;
 	void *buf;
 	int ret;
+	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+	if (cpg->p.copy_back) {
+		int need_copy_len = cpg->p.crypt_len;
+		int sram_offset = 0;
+		do {
+			int dst_copy;
 
-	cpg->p.total_req_bytes += cpg->p.crypt_len;
-	do {
-		int dst_copy;
+			if (!cpg->p.sg_dst_left) {
+				ret = sg_miter_next(&cpg->p.dst_sg_it);
+				BUG_ON(!ret);
+				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+				cpg->p.dst_start = 0;
+			}
 
-		if (!cpg->p.sg_dst_left) {
-			ret = sg_miter_next(&cpg->p.dst_sg_it);
-			BUG_ON(!ret);
-			cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
-			cpg->p.dst_start = 0;
-		}
+			buf = cpg->p.dst_sg_it.addr;
+			buf += cpg->p.dst_start;
 
-		buf = cpg->p.dst_sg_it.addr;
-		buf += cpg->p.dst_start;
+			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
 
-		dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+			memcpy(buf,
+			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+			       dst_copy);
+			sram_offset += dst_copy;
+			cpg->p.sg_dst_left -= dst_copy;
+			need_copy_len -= dst_copy;
+			cpg->p.dst_start += dst_copy;
+		} while (need_copy_len > 0);
+	}
 
-		memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
-
-		cpg->p.sg_dst_left -= dst_copy;
-		cpg->p.crypt_len -= dst_copy;
-		cpg->p.dst_start += dst_copy;
-	} while (cpg->p.crypt_len > 0);
+	cpg->p.crypt_len = 0;
 
 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
-	if (cpg->p.total_req_bytes < req->nbytes) {
+	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
 		/* process next scatter list entry */
 		cpg->eng_st = ENGINE_BUSY;
-		mv_process_current_q(0);
+		cpg->p.process(0);
 	} else {
-		sg_miter_stop(&cpg->p.src_sg_it);
-		sg_miter_stop(&cpg->p.dst_sg_it);
-		mv_crypto_algo_completion();
+		cpg->p.complete();
 		cpg->eng_st = ENGINE_IDLE;
-		req->base.complete(&req->base, 0);
+		local_bh_disable();
+		req->complete(req, 0);
+		local_bh_enable();
 	}
 }
 
 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
 {
 	int i = 0;
+	size_t cur_len;
 
-	do {
-		total_bytes -= sl[i].length;
-		i++;
-
-	} while (total_bytes > 0);
+	while (1) {
+		cur_len = sl[i].length;
+		++i;
+		if (total_bytes > cur_len)
+			total_bytes -= cur_len;
+		else
+			break;
+	}
 
 	return i;
 }
 
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
 {
+	struct req_progress *p = &cpg->p;
 	int num_sgs;
 
-	cpg->cur_req = req;
-	memset(&cpg->p, 0, sizeof(struct req_progress));
+	cpg->cur_req = &req->base;
+	memset(p, 0, sizeof(struct req_progress));
+	p->hw_nbytes = req->nbytes;
+	p->complete = mv_crypto_algo_completion;
+	p->process = mv_process_current_q;
+	p->copy_back = 1;
 
 	num_sgs = count_sgs(req->src, req->nbytes);
-	sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 
 	num_sgs = count_sgs(req->dst, req->nbytes);
-	sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
 	mv_process_current_q(1);
 }
 
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+	struct req_progress *p = &cpg->p;
+	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+	int num_sgs, hw_bytes, old_extra_bytes, rc;
+	cpg->cur_req = &req->base;
+	memset(p, 0, sizeof(struct req_progress));
+	hw_bytes = req->nbytes + ctx->extra_bytes;
+	old_extra_bytes = ctx->extra_bytes;
+
+	if (unlikely(ctx->extra_bytes)) {
+		memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+		       ctx->extra_bytes);
+		p->crypt_len = ctx->extra_bytes;
+	}
+
+	memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+	if (unlikely(!ctx->first_hash)) {
+		writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+		writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+		writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+		writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+		writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+	}
+
+	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+	if (ctx->extra_bytes != 0
+	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+		hw_bytes -= ctx->extra_bytes;
+	else
+		ctx->extra_bytes = 0;
+
+	num_sgs = count_sgs(req->src, req->nbytes);
+	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+	if (hw_bytes) {
+		p->hw_nbytes = hw_bytes;
+		p->complete = mv_hash_algo_completion;
+		p->process = mv_process_hash_current;
+
+		mv_process_hash_current(1);
+	} else {
+		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+				ctx->extra_bytes - old_extra_bytes);
+		sg_miter_stop(&p->src_sg_it);
+		if (ctx->last_chunk)
+			rc = mv_hash_final_fallback(req);
+		else
+			rc = 0;
+		cpg->eng_st = ENGINE_IDLE;
+		local_bh_disable();
+		req->base.complete(&req->base, rc);
+		local_bh_enable();
+	}
+}
+
 static int queue_manag(void *data)
 {
 	cpg->eng_st = ENGINE_IDLE;
 	do {
-		struct ablkcipher_request *req;
 		struct crypto_async_request *async_req = NULL;
 		struct crypto_async_request *backlog;
 
@@ -338,9 +600,18 @@
 		}
 
 		if (async_req) {
-			req = container_of(async_req,
-					struct ablkcipher_request, base);
-			mv_enqueue_new_req(req);
+			if (async_req->tfm->__crt_alg->cra_type !=
+			    &crypto_ahash_type) {
+				struct ablkcipher_request *req =
+				    container_of(async_req,
+						 struct ablkcipher_request,
+						 base);
+				mv_start_new_crypt_req(req);
+			} else {
+				struct ahash_request *req =
+				    ahash_request_cast(async_req);
+				mv_start_new_hash_req(req);
+			}
 			async_req = NULL;
 		}
 
@@ -350,13 +621,13 @@
 	return 0;
 }
 
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
 {
 	unsigned long flags;
 	int ret;
 
 	spin_lock_irqsave(&cpg->lock, flags);
-	ret = ablkcipher_enqueue_request(&cpg->queue, req);
+	ret = crypto_enqueue_request(&cpg->queue, req);
 	spin_unlock_irqrestore(&cpg->lock, flags);
 	wake_up_process(cpg->queue_th);
 	return ret;
@@ -369,7 +640,7 @@
 	req_ctx->op = COP_AES_ECB;
 	req_ctx->decrypt = 0;
 
-	return mv_handle_req(req);
+	return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@
 	req_ctx->decrypt = 1;
 
 	compute_aes_dec_key(ctx);
-	return mv_handle_req(req);
+	return mv_handle_req(&req->base);
 }
 
 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@
 	req_ctx->op = COP_AES_CBC;
 	req_ctx->decrypt = 0;
 
-	return mv_handle_req(req);
+	return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@
 	req_ctx->decrypt = 1;
 
 	compute_aes_dec_key(ctx);
-	return mv_handle_req(req);
+	return mv_handle_req(&req->base);
 }
 
 static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@
 	return 0;
 }
 
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+				 int is_last, unsigned int req_len,
+				 int count_add)
+{
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->op = op;
+	ctx->count = req_len;
+	ctx->first_hash = 1;
+	ctx->last_chunk = is_last;
+	ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+				   unsigned req_len)
+{
+	ctx->last_chunk = is_last;
+	ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+			     tfm_ctx->count_add);
+	return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+	if (!req->nbytes)
+		return 0;
+
+	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+	return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+	/* dummy buffer of 4 bytes */
+	sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+	/* I think I'm allowed to do that... */
+	ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+	mv_update_hash_req_ctx(ctx, 1, 0);
+	return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+	if (!req->nbytes)
+		return mv_hash_final(req);
+
+	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+	return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+			     req->nbytes, tfm_ctx->count_add);
+	return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+			     const void *ostate)
+{
+	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+	int i;
+	for (i = 0; i < 5; i++) {
+		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+	}
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+			  unsigned int keylen)
+{
+	int rc;
+	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+	int bs, ds, ss;
+
+	if (!ctx->base_hash)
+		return 0;
+
+	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+	if (rc)
+		return rc;
+
+	/* Can't see a way to extract the ipad/opad from the fallback tfm
+	   so I'm basically copying code from the hmac module */
+	bs = crypto_shash_blocksize(ctx->base_hash);
+	ds = crypto_shash_digestsize(ctx->base_hash);
+	ss = crypto_shash_statesize(ctx->base_hash);
+
+	{
+		struct {
+			struct shash_desc shash;
+			char ctx[crypto_shash_descsize(ctx->base_hash)];
+		} desc;
+		unsigned int i;
+		char ipad[ss];
+		char opad[ss];
+
+		desc.shash.tfm = ctx->base_hash;
+		desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+		    CRYPTO_TFM_REQ_MAY_SLEEP;
+
+		if (keylen > bs) {
+			int err;
+
+			err =
+			    crypto_shash_digest(&desc.shash, key, keylen, ipad);
+			if (err)
+				return err;
+
+			keylen = ds;
+		} else
+			memcpy(ipad, key, keylen);
+
+		memset(ipad + keylen, 0, bs - keylen);
+		memcpy(opad, ipad, bs);
+
+		for (i = 0; i < bs; i++) {
+			ipad[i] ^= 0x36;
+			opad[i] ^= 0x5c;
+		}
+
+		rc = crypto_shash_init(&desc.shash) ? :
+		    crypto_shash_update(&desc.shash, ipad, bs) ? :
+		    crypto_shash_export(&desc.shash, ipad) ? :
+		    crypto_shash_init(&desc.shash) ? :
+		    crypto_shash_update(&desc.shash, opad, bs) ? :
+		    crypto_shash_export(&desc.shash, opad);
+
+		if (rc == 0)
+			mv_hash_init_ivs(ctx, ipad, opad);
+
+		return rc;
+	}
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+			    enum hash_op op, int count_add)
+{
+	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_shash *fallback_tfm = NULL;
+	struct crypto_shash *base_hash = NULL;
+	int err = -ENOMEM;
+
+	ctx->op = op;
+	ctx->count_add = count_add;
+
+	/* Allocate a fallback and abort if it failed. */
+	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+					  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		printk(KERN_WARNING MV_CESA
+		       "Fallback driver '%s' could not be loaded!\n",
+		       fallback_driver_name);
+		err = PTR_ERR(fallback_tfm);
+		goto out;
+	}
+	ctx->fallback = fallback_tfm;
+
+	if (base_hash_name) {
+		/* Allocate a hash to compute the ipad/opad of hmac. */
+		base_hash = crypto_alloc_shash(base_hash_name, 0,
+					       CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(base_hash)) {
+			printk(KERN_WARNING MV_CESA
+			       "Base driver '%s' could not be loaded!\n",
+			       base_hash_name);
+			err = PTR_ERR(fallback_tfm);
+			goto err_bad_base;
+		}
+	}
+	ctx->base_hash = base_hash;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct mv_req_hash_ctx) +
+				 crypto_shash_descsize(ctx->fallback));
+	return 0;
+err_bad_base:
+	crypto_free_shash(fallback_tfm);
+out:
+	return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(ctx->fallback);
+	if (ctx->base_hash)
+		crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
 irqreturn_t crypto_int(int irq, void *priv)
 {
 	u32 val;
@@ -474,6 +954,53 @@
 	},
 };
 
+struct ahash_alg mv_sha1_alg = {
+	.init = mv_hash_init,
+	.update = mv_hash_update,
+	.final = mv_hash_final,
+	.finup = mv_hash_finup,
+	.digest = mv_hash_digest,
+	.halg = {
+		 .digestsize = SHA1_DIGEST_SIZE,
+		 .base = {
+			  .cra_name = "sha1",
+			  .cra_driver_name = "mv-sha1",
+			  .cra_priority = 300,
+			  .cra_flags =
+			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+			  .cra_blocksize = SHA1_BLOCK_SIZE,
+			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+			  .cra_init = mv_cra_hash_sha1_init,
+			  .cra_exit = mv_cra_hash_exit,
+			  .cra_module = THIS_MODULE,
+			  }
+		 }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+	.init = mv_hash_init,
+	.update = mv_hash_update,
+	.final = mv_hash_final,
+	.finup = mv_hash_finup,
+	.digest = mv_hash_digest,
+	.setkey = mv_hash_setkey,
+	.halg = {
+		 .digestsize = SHA1_DIGEST_SIZE,
+		 .base = {
+			  .cra_name = "hmac(sha1)",
+			  .cra_driver_name = "mv-hmac-sha1",
+			  .cra_priority = 300,
+			  .cra_flags =
+			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+			  .cra_blocksize = SHA1_BLOCK_SIZE,
+			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+			  .cra_init = mv_cra_hash_hmac_sha1_init,
+			  .cra_exit = mv_cra_hash_exit,
+			  .cra_module = THIS_MODULE,
+			  }
+		 }
+};
+
 static int mv_probe(struct platform_device *pdev)
 {
 	struct crypto_priv *cp;
@@ -482,7 +1009,7 @@
 	int ret;
 
 	if (cpg) {
-		printk(KERN_ERR "Second crypto dev?\n");
+		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
 		return -EEXIST;
 	}
 
@@ -496,7 +1023,7 @@
 
 	spin_lock_init(&cp->lock);
 	crypto_init_queue(&cp->queue, 50);
-	cp->reg = ioremap(res->start, res->end - res->start + 1);
+	cp->reg = ioremap(res->start, resource_size(res));
 	if (!cp->reg) {
 		ret = -ENOMEM;
 		goto err;
@@ -507,7 +1034,7 @@
 		ret = -ENXIO;
 		goto err_unmap_reg;
 	}
-	cp->sram_size = res->end - res->start + 1;
+	cp->sram_size = resource_size(res);
 	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
 	cp->sram = ioremap(res->start, cp->sram_size);
 	if (!cp->sram) {
@@ -546,6 +1073,21 @@
 	ret = crypto_register_alg(&mv_aes_alg_cbc);
 	if (ret)
 		goto err_unreg_ecb;
+
+	ret = crypto_register_ahash(&mv_sha1_alg);
+	if (ret == 0)
+		cpg->has_sha1 = 1;
+	else
+		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+	if (ret == 0) {
+		cpg->has_hmac_sha1 = 1;
+	} else {
+		printk(KERN_WARNING MV_CESA
+		       "Could not register hmac-sha1 driver\n");
+	}
+
 	return 0;
 err_unreg_ecb:
 	crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@
 
 	crypto_unregister_alg(&mv_aes_alg_ecb);
 	crypto_unregister_alg(&mv_aes_alg_cbc);
+	if (cp->has_sha1)
+		crypto_unregister_ahash(&mv_sha1_alg);
+	if (cp->has_hmac_sha1)
+		crypto_unregister_ahash(&mv_hmac_sha1_alg);
 	kthread_stop(cp->queue_th);
 	free_irq(cp->irq, cp);
 	memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3..08fcb11 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
 #ifndef __MV_CRYPTO_H__
 
 #define DIGEST_INITIAL_VAL_A	0xdd00
+#define DIGEST_INITIAL_VAL_B	0xdd04
+#define DIGEST_INITIAL_VAL_C	0xdd08
+#define DIGEST_INITIAL_VAL_D	0xdd0c
+#define DIGEST_INITIAL_VAL_E	0xdd10
 #define DES_CMD_REG		0xdd58
 
 #define SEC_ACCEL_CMD		0xde00
@@ -70,6 +74,10 @@
 #define CFG_AES_LEN_128		(0 << 24)
 #define CFG_AES_LEN_192		(1 << 24)
 #define CFG_AES_LEN_256		(2 << 24)
+#define CFG_NOT_FRAG		(0 << 30)
+#define CFG_FIRST_FRAG		(1 << 30)
+#define CFG_LAST_FRAG		(2 << 30)
+#define CFG_MID_FRAG		(3 << 30)
 
 	u32 enc_p;
 #define ENC_P_SRC(x)		(x)
@@ -90,7 +98,11 @@
 #define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
 
 	u32 mac_digest;
+#define MAC_DIGEST_P(x)	(x)
+#define MAC_FRAG_LEN(x)	((x) << 16)
 	u32 mac_iv;
+#define MAC_INNER_IV_P(x)	(x)
+#define MAC_OUTER_IV_P(x)	((x) << 16)
 }__attribute__ ((packed));
 	/*
 	 * /-----------\ 0
@@ -101,19 +113,37 @@
 	 * |  IV   IN  |	4 * 4
 	 * |-----------| 0x40 (inplace)
 	 * |  IV BUF   |	4 * 4
-	 * |-----------| 0x50
+	 * |-----------| 0x80
 	 * |  DATA IN  |	16 * x (max ->max_req_size)
-	 * |-----------| 0x50 (inplace operation)
+	 * |-----------| 0x80 (inplace operation)
 	 * |  DATA OUT |	16 * x (max ->max_req_size)
 	 * \-----------/ SRAM size
 	 */
+
+	/* Hashing memory map:
+	 * /-----------\ 0
+	 * | ACCEL CFG |        4 * 8
+	 * |-----------| 0x20
+	 * | Inner IV  |        5 * 4
+	 * |-----------| 0x34
+	 * | Outer IV  |        5 * 4
+	 * |-----------| 0x48
+	 * | Output BUF|        5 * 4
+	 * |-----------| 0x80
+	 * |  DATA IN  |        64 * x (max ->max_req_size)
+	 * \-----------/ SRAM size
+	 */
 #define SRAM_CONFIG		0x00
 #define SRAM_DATA_KEY_P		0x20
 #define SRAM_DATA_IV		0x40
 #define SRAM_DATA_IV_BUF	0x40
-#define SRAM_DATA_IN_START	0x50
-#define SRAM_DATA_OUT_START	0x50
+#define SRAM_DATA_IN_START	0x80
+#define SRAM_DATA_OUT_START	0x80
 
-#define SRAM_CFG_SPACE		0x50
+#define SRAM_HMAC_IV_IN		0x20
+#define SRAM_HMAC_IV_OUT	0x34
+#define SRAM_DIGEST_BUF		0x48
+
+#define SRAM_CFG_SPACE		0x80
 
 #endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 0000000..f7c7937
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,95 @@
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+	/* o0: queue type
+	 * o1: RA of queue
+	 * o2: num entries in queue
+	 * o3: address of queue handle return
+	 */
+ENTRY(sun4v_ncs_qconf)
+	mov	HV_FAST_NCS_QCONF, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o3]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qconf)
+
+	/* %o0: queue handle
+	 * %o1: address of queue type return
+	 * %o2: address of queue base address return
+	 * %o3: address of queue num entries return
+	 */
+ENTRY(sun4v_ncs_qinfo)
+	mov	%o1, %g1
+	mov	%o2, %g2
+	mov	%o3, %g3
+	mov	HV_FAST_NCS_QINFO, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%g1]
+	stx	%o2, [%g2]
+	stx	%o3, [%g3]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qinfo)
+
+	/* %o0: queue handle
+	 * %o1: address of head offset return
+	 */
+ENTRY(sun4v_ncs_gethead)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_GETHEAD, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_gethead)
+
+	/* %o0: queue handle
+	 * %o1: address of tail offset return
+	 */
+ENTRY(sun4v_ncs_gettail)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_GETTAIL, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_gettail)
+
+	/* %o0: queue handle
+	 * %o1: new tail offset
+	 */
+ENTRY(sun4v_ncs_settail)
+	mov	HV_FAST_NCS_SETTAIL, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_ncs_settail)
+
+	/* %o0: queue handle
+	 * %o1: address of devino return
+	 */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+	/* %o0: queue handle
+	 * %o1: new head offset
+	 */
+ENTRY(sun4v_ncs_sethead_marker)
+	mov	HV_FAST_NCS_SETHEAD_MARKER, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 0000000..8566be8
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2083 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME		"n2_crypto"
+#define DRV_MODULE_VERSION	"0.1"
+#define DRV_MODULE_RELDATE	"April 29, 2010"
+
+static char version[] __devinitdata =
+	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY		300
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+	cpumask_t		sharing;
+	unsigned long		qhandle;
+
+	spinlock_t		lock;
+	u8			q_type;
+	void			*q;
+	unsigned long		head;
+	unsigned long		tail;
+	struct list_head	jobs;
+
+	unsigned long		devino;
+
+	char			irq_name[32];
+	unsigned int		irq;
+
+	struct list_head	list;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+	if (q->q_type == HV_NCS_QTYPE_MAU) {
+		off += MAU_ENTRY_SIZE;
+		if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+			off = 0;
+	} else {
+		off += CWQ_ENTRY_SIZE;
+		if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+			off = 0;
+	}
+	return off;
+}
+
+struct n2_request_common {
+	struct list_head	entry;
+	unsigned int		offset;
+};
+#define OFFSET_NOT_RUNNING	(~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+				unsigned long old_head, unsigned long new_head)
+{
+	if (old_head <= new_head) {
+		if (offset > old_head && offset <= new_head)
+			return true;
+	} else {
+		if (offset > old_head || offset <= new_head)
+			return true;
+	}
+	return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt.  We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+	unsigned long off, new_head, hv_ret;
+	struct spu_queue *q = dev_id;
+
+	pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+	       smp_processor_id(), q->qhandle);
+
+	spin_lock(&q->lock);
+
+	hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+	pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+	       smp_processor_id(), new_head, hv_ret);
+
+	for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+		/* XXX ... XXX */
+	}
+
+	hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+	if (hv_ret == HV_EOK)
+		q->head = new_head;
+
+	spin_unlock(&q->lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+	struct spu_queue *q = dev_id;
+	unsigned long head, hv_ret;
+
+	spin_lock(&q->lock);
+
+	pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+	       smp_processor_id(), q->qhandle);
+
+	hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+	pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+	       smp_processor_id(), head, hv_ret);
+
+	sun4v_ncs_sethead_marker(q->qhandle, head);
+
+	spin_unlock(&q->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+	return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+	unsigned long head = q->head;
+	unsigned long tail = q->tail;
+	unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+	unsigned long diff;
+
+	if (head > tail)
+		diff = head - tail;
+	else
+		diff = (end - tail) + head;
+
+	return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+	int avail = spu_queue_num_free(q);
+
+	if (avail >= num_entries)
+		return q->q + q->tail;
+
+	return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+	unsigned long hv_ret, new_tail;
+
+	new_tail = spu_next_offset(q, last - q->q);
+
+	hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+	if (hv_ret == HV_EOK)
+		q->tail = new_tail;
+	return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+			     int enc_type, int auth_type,
+			     unsigned int hash_len,
+			     bool sfas, bool sob, bool eob, bool encrypt,
+			     int opcode)
+{
+	u64 word = (len - 1) & CONTROL_LEN;
+
+	word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+	word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+	word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+	if (sfas)
+		word |= CONTROL_STORE_FINAL_AUTH_STATE;
+	if (sob)
+		word |= CONTROL_START_OF_BLOCK;
+	if (eob)
+		word |= CONTROL_END_OF_BLOCK;
+	if (encrypt)
+		word |= CONTROL_ENCRYPT;
+	if (hmac_key_len)
+		word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+	if (hash_len)
+		word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+	return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+	if (this_len >= 64 ||
+	    qp->head != qp->tail)
+		return true;
+	return false;
+}
+#endif
+
+struct n2_base_ctx {
+	struct list_head		list;
+};
+
+static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+{
+	INIT_LIST_HEAD(&ctx->list);
+}
+
+struct n2_hash_ctx {
+	struct n2_base_ctx		base;
+
+	struct crypto_ahash		*fallback;
+
+	/* These next three members must match the layout created by
+	 * crypto_init_shash_ops_async.  This allows us to properly
+	 * plumb requests we can't do in hardware down to the fallback
+	 * operation, providing all of the data structures and layouts
+	 * expected by those paths.
+	 */
+	struct ahash_request		fallback_req;
+	struct shash_desc		fallback_desc;
+	union {
+		struct md5_state	md5;
+		struct sha1_state	sha1;
+		struct sha256_state	sha256;
+	} u;
+
+	unsigned char			hash_key[64];
+	unsigned char			keyed_zero_hash[32];
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_init(&ctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	ctx->fallback_req.nbytes = req->nbytes;
+	ctx->fallback_req.src = req->src;
+
+	return crypto_ahash_update(&ctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	ctx->fallback_req.result = req->result;
+
+	return crypto_ahash_final(&ctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	ctx->fallback_req.nbytes = req->nbytes;
+	ctx->fallback_req.src = req->src;
+	ctx->fallback_req.result = req->result;
+
+	return crypto_ahash_finup(&ctx->fallback_req);
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct crypto_ahash *fallback_tfm;
+	int err;
+
+	fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+					  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		pr_warning("Fallback driver '%s' could not be loaded!\n",
+			   fallback_driver_name);
+		err = PTR_ERR(fallback_tfm);
+		goto out;
+	}
+
+	ctx->fallback = fallback_tfm;
+	return 0;
+
+out:
+	return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+	crypto_free_ahash(ctx->fallback);
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+	unsigned long head, hv_ret;
+
+	do {
+		hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+		if (hv_ret != HV_EOK) {
+			pr_err("Hypervisor error on gethead\n");
+			break;
+		}
+		if (head == qp->tail) {
+			qp->head = head;
+			break;
+		}
+	} while (1);
+	return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+					      struct cwq_initial_entry *ent)
+{
+	unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+	if (hv_ret == HV_EOK)
+		hv_ret = wait_for_tail(qp);
+
+	return hv_ret;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req,
+				unsigned int auth_type, unsigned int digest_size,
+				unsigned int result_size, void *hash_loc)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct cwq_initial_entry *ent;
+	struct crypto_hash_walk walk;
+	struct spu_queue *qp;
+	unsigned long flags;
+	int err = -ENODEV;
+	int nbytes, cpu;
+
+	/* The total effective length of the operation may not
+	 * exceed 2^16.
+	 */
+	if (unlikely(req->nbytes > (1 << 16))) {
+		ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+		ctx->fallback_req.base.flags =
+			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+		ctx->fallback_req.nbytes = req->nbytes;
+		ctx->fallback_req.src = req->src;
+		ctx->fallback_req.result = req->result;
+
+		return crypto_ahash_digest(&ctx->fallback_req);
+	}
+
+	n2_base_ctx_init(&ctx->base);
+
+	nbytes = crypto_hash_walk_first(req, &walk);
+
+	cpu = get_cpu();
+	qp = cpu_to_cwq[cpu];
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	/* XXX can do better, improve this later by doing a by-hand scatterlist
+	 * XXX walk, etc.
+	 */
+	ent = qp->q + qp->tail;
+
+	ent->control = control_word_base(nbytes, 0, 0,
+					 auth_type, digest_size,
+					 false, true, false, false,
+					 OPCODE_INPLACE_BIT |
+					 OPCODE_AUTH_MAC);
+	ent->src_addr = __pa(walk.data);
+	ent->auth_key_addr = 0UL;
+	ent->auth_iv_addr = __pa(hash_loc);
+	ent->final_auth_state_addr = 0UL;
+	ent->enc_key_addr = 0UL;
+	ent->enc_iv_addr = 0UL;
+	ent->dest_addr = __pa(hash_loc);
+
+	nbytes = crypto_hash_walk_done(&walk, 0);
+	while (nbytes > 0) {
+		ent = spu_queue_next(qp, ent);
+
+		ent->control = (nbytes - 1);
+		ent->src_addr = __pa(walk.data);
+		ent->auth_key_addr = 0UL;
+		ent->auth_iv_addr = 0UL;
+		ent->final_auth_state_addr = 0UL;
+		ent->enc_key_addr = 0UL;
+		ent->enc_iv_addr = 0UL;
+		ent->dest_addr = 0UL;
+
+		nbytes = crypto_hash_walk_done(&walk, 0);
+	}
+	ent->control |= CONTROL_END_OF_BLOCK;
+
+	if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+		err = -EINVAL;
+	else
+		err = 0;
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+	if (!err)
+		memcpy(req->result, hash_loc, result_size);
+out:
+	put_cpu();
+
+	return err;
+}
+
+static int n2_md5_async_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct md5_state *m = &ctx->u.md5;
+
+	if (unlikely(req->nbytes == 0)) {
+		static const char md5_zero[MD5_DIGEST_SIZE] = {
+			0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+			0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+		};
+
+		memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+		return 0;
+	}
+	m->hash[0] = cpu_to_le32(0x67452301);
+	m->hash[1] = cpu_to_le32(0xefcdab89);
+	m->hash[2] = cpu_to_le32(0x98badcfe);
+	m->hash[3] = cpu_to_le32(0x10325476);
+
+	return n2_hash_async_digest(req, AUTH_TYPE_MD5,
+				    MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
+				    m->hash);
+}
+
+static int n2_sha1_async_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct sha1_state *s = &ctx->u.sha1;
+
+	if (unlikely(req->nbytes == 0)) {
+		static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+			0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+			0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+			0x07, 0x09
+		};
+
+		memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
+		return 0;
+	}
+	s->state[0] = SHA1_H0;
+	s->state[1] = SHA1_H1;
+	s->state[2] = SHA1_H2;
+	s->state[3] = SHA1_H3;
+	s->state[4] = SHA1_H4;
+
+	return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
+				    SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
+				    s->state);
+}
+
+static int n2_sha256_async_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct sha256_state *s = &ctx->u.sha256;
+
+	if (req->nbytes == 0) {
+		static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+			0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+			0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+			0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+			0x1b, 0x78, 0x52, 0xb8, 0x55
+		};
+
+		memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
+		return 0;
+	}
+	s->state[0] = SHA256_H0;
+	s->state[1] = SHA256_H1;
+	s->state[2] = SHA256_H2;
+	s->state[3] = SHA256_H3;
+	s->state[4] = SHA256_H4;
+	s->state[5] = SHA256_H5;
+	s->state[6] = SHA256_H6;
+	s->state[7] = SHA256_H7;
+
+	return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+				    SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
+				    s->state);
+}
+
+static int n2_sha224_async_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct sha256_state *s = &ctx->u.sha256;
+
+	if (req->nbytes == 0) {
+		static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+			0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+			0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+			0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+			0x2f
+		};
+
+		memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
+		return 0;
+	}
+	s->state[0] = SHA224_H0;
+	s->state[1] = SHA224_H1;
+	s->state[2] = SHA224_H2;
+	s->state[3] = SHA224_H3;
+	s->state[4] = SHA224_H4;
+	s->state[5] = SHA224_H5;
+	s->state[6] = SHA224_H6;
+	s->state[7] = SHA224_H7;
+
+	return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+				    SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
+				    s->state);
+}
+
+struct n2_cipher_context {
+	int			key_len;
+	int			enc_type;
+	union {
+		u8		aes[AES_MAX_KEY_SIZE];
+		u8		des[DES_KEY_SIZE];
+		u8		des3[3 * DES_KEY_SIZE];
+		u8		arc4[258]; /* S-box, X, Y */
+	} key;
+};
+
+#define N2_CHUNK_ARR_LEN	16
+
+struct n2_crypto_chunk {
+	struct list_head	entry;
+	unsigned long		iv_paddr : 44;
+	unsigned long		arr_len : 20;
+	unsigned long		dest_paddr;
+	unsigned long		dest_final;
+	struct {
+		unsigned long	src_paddr : 44;
+		unsigned long	src_len : 20;
+	} arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+	struct ablkcipher_walk	walk;
+	struct list_head	chunk_list;
+	struct n2_crypto_chunk	chunk;
+	u8			temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size.  This means that a cipher block
+ * can span at most 2 descriptors.  However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size).  So, for example, assuming an 8 byte block size:
+ *
+ *	0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ *	0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+	struct list_head	entry;
+	u8			enc_type;
+	struct crypto_alg	alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+
+	return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+	struct ablkcipher_walk	walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+	ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->enc_type |= ENC_TYPE_ALG_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->enc_type |= ENC_TYPE_ALG_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->enc_type |= ENC_TYPE_ALG_AES256;
+		break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->key_len = keylen;
+	memcpy(ctx->key.aes, key, keylen);
+	return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int err;
+
+	ctx->enc_type = n2alg->enc_type;
+
+	if (keylen != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	err = des_ekey(tmp, key);
+	if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->key_len = keylen;
+	memcpy(ctx->key.des, key, keylen);
+	return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			  unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+	ctx->enc_type = n2alg->enc_type;
+
+	if (keylen != (3 * DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->key_len = keylen;
+	memcpy(ctx->key.des3, key, keylen);
+	return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			  unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+	u8 *s = ctx->key.arc4;
+	u8 *x = s + 256;
+	u8 *y = x + 1;
+	int i, j, k;
+
+	ctx->enc_type = n2alg->enc_type;
+
+	j = k = 0;
+	*x = 0;
+	*y = 0;
+	for (i = 0; i < 256; i++)
+		s[i] = i;
+	for (i = 0; i < 256; i++) {
+		u8 a = s[i];
+		j = (j + key[k] + a) & 0xff;
+		s[i] = s[j];
+		s[j] = a;
+		if (++k >= keylen)
+			k = 0;
+	}
+
+	return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+	int this_len = nbytes;
+
+	this_len -= (nbytes & (block_size - 1));
+	return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+			    struct spu_queue *qp, bool encrypt)
+{
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct cwq_initial_entry *ent;
+	bool in_place;
+	int i;
+
+	ent = spu_queue_alloc(qp, cp->arr_len);
+	if (!ent) {
+		pr_info("queue_alloc() of %d fails\n",
+			cp->arr_len);
+		return -EBUSY;
+	}
+
+	in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+	ent->control = control_word_base(cp->arr[0].src_len,
+					 0, ctx->enc_type, 0, 0,
+					 false, true, false, encrypt,
+					 OPCODE_ENCRYPT |
+					 (in_place ? OPCODE_INPLACE_BIT : 0));
+	ent->src_addr = cp->arr[0].src_paddr;
+	ent->auth_key_addr = 0UL;
+	ent->auth_iv_addr = 0UL;
+	ent->final_auth_state_addr = 0UL;
+	ent->enc_key_addr = __pa(&ctx->key);
+	ent->enc_iv_addr = cp->iv_paddr;
+	ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+	for (i = 1; i < cp->arr_len; i++) {
+		ent = spu_queue_next(qp, ent);
+
+		ent->control = cp->arr[i].src_len - 1;
+		ent->src_addr = cp->arr[i].src_paddr;
+		ent->auth_key_addr = 0UL;
+		ent->auth_iv_addr = 0UL;
+		ent->final_auth_state_addr = 0UL;
+		ent->enc_key_addr = 0UL;
+		ent->enc_iv_addr = 0UL;
+		ent->dest_addr = 0UL;
+	}
+	ent->control |= CONTROL_END_OF_BLOCK;
+
+	return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct ablkcipher_walk *walk = &rctx->walk;
+	struct n2_crypto_chunk *chunk;
+	unsigned long dest_prev;
+	unsigned int tot_len;
+	bool prev_in_place;
+	int err, nbytes;
+
+	ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+	err = ablkcipher_walk_phys(req, walk);
+	if (err)
+		return err;
+
+	INIT_LIST_HEAD(&rctx->chunk_list);
+
+	chunk = &rctx->chunk;
+	INIT_LIST_HEAD(&chunk->entry);
+
+	chunk->iv_paddr = 0UL;
+	chunk->arr_len = 0;
+	chunk->dest_paddr = 0UL;
+
+	prev_in_place = false;
+	dest_prev = ~0UL;
+	tot_len = 0;
+
+	while ((nbytes = walk->nbytes) != 0) {
+		unsigned long dest_paddr, src_paddr;
+		bool in_place;
+		int this_len;
+
+		src_paddr = (page_to_phys(walk->src.page) +
+			     walk->src.offset);
+		dest_paddr = (page_to_phys(walk->dst.page) +
+			      walk->dst.offset);
+		in_place = (src_paddr == dest_paddr);
+		this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+		if (chunk->arr_len != 0) {
+			if (in_place != prev_in_place ||
+			    (!prev_in_place &&
+			     dest_paddr != dest_prev) ||
+			    chunk->arr_len == N2_CHUNK_ARR_LEN ||
+			    tot_len + this_len > (1 << 16)) {
+				chunk->dest_final = dest_prev;
+				list_add_tail(&chunk->entry,
+					      &rctx->chunk_list);
+				chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+				if (!chunk) {
+					err = -ENOMEM;
+					break;
+				}
+				INIT_LIST_HEAD(&chunk->entry);
+			}
+		}
+		if (chunk->arr_len == 0) {
+			chunk->dest_paddr = dest_paddr;
+			tot_len = 0;
+		}
+		chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+		chunk->arr[chunk->arr_len].src_len = this_len;
+		chunk->arr_len++;
+
+		dest_prev = dest_paddr + this_len;
+		prev_in_place = in_place;
+		tot_len += this_len;
+
+		err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+		if (err)
+			break;
+	}
+	if (!err && chunk->arr_len != 0) {
+		chunk->dest_final = dest_prev;
+		list_add_tail(&chunk->entry, &rctx->chunk_list);
+	}
+
+	return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct n2_crypto_chunk *c, *tmp;
+
+	if (final_iv)
+		memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+	ablkcipher_walk_complete(&rctx->walk);
+	list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+		list_del(&c->entry);
+		if (unlikely(c != &rctx->chunk))
+			kfree(c);
+	}
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct crypto_tfm *tfm = req->base.tfm;
+	int err = n2_compute_chunks(req);
+	struct n2_crypto_chunk *c, *tmp;
+	unsigned long flags, hv_ret;
+	struct spu_queue *qp;
+
+	if (err)
+		return err;
+
+	qp = cpu_to_cwq[get_cpu()];
+	err = -ENODEV;
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+		err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+		if (err)
+			break;
+		list_del(&c->entry);
+		if (unlikely(c != &rctx->chunk))
+			kfree(c);
+	}
+	if (!err) {
+		hv_ret = wait_for_tail(qp);
+		if (hv_ret != HV_EOK)
+			err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+	put_cpu();
+
+out:
+	n2_chunk_complete(req, NULL);
+	return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+	return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+	return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct crypto_tfm *tfm = req->base.tfm;
+	unsigned long flags, hv_ret, iv_paddr;
+	int err = n2_compute_chunks(req);
+	struct n2_crypto_chunk *c, *tmp;
+	struct spu_queue *qp;
+	void *final_iv_addr;
+
+	final_iv_addr = NULL;
+
+	if (err)
+		return err;
+
+	qp = cpu_to_cwq[get_cpu()];
+	err = -ENODEV;
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	if (encrypt) {
+		iv_paddr = __pa(rctx->walk.iv);
+		list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+					 entry) {
+			c->iv_paddr = iv_paddr;
+			err = __n2_crypt_chunk(tfm, c, qp, true);
+			if (err)
+				break;
+			iv_paddr = c->dest_final - rctx->walk.blocksize;
+			list_del(&c->entry);
+			if (unlikely(c != &rctx->chunk))
+				kfree(c);
+		}
+		final_iv_addr = __va(iv_paddr);
+	} else {
+		list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+						 entry) {
+			if (c == &rctx->chunk) {
+				iv_paddr = __pa(rctx->walk.iv);
+			} else {
+				iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+					    tmp->arr[tmp->arr_len-1].src_len -
+					    rctx->walk.blocksize);
+			}
+			if (!final_iv_addr) {
+				unsigned long pa;
+
+				pa = (c->arr[c->arr_len-1].src_paddr +
+				      c->arr[c->arr_len-1].src_len -
+				      rctx->walk.blocksize);
+				final_iv_addr = rctx->temp_iv;
+				memcpy(rctx->temp_iv, __va(pa),
+				       rctx->walk.blocksize);
+			}
+			c->iv_paddr = iv_paddr;
+			err = __n2_crypt_chunk(tfm, c, qp, false);
+			if (err)
+				break;
+			list_del(&c->entry);
+			if (unlikely(c != &rctx->chunk))
+				kfree(c);
+		}
+	}
+	if (!err) {
+		hv_ret = wait_for_tail(qp);
+		if (hv_ret != HV_EOK)
+			err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+	put_cpu();
+
+out:
+	n2_chunk_complete(req, err ? NULL : final_iv_addr);
+	return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+	return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+	return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+	const char		*name;
+	const char		*drv_name;
+	u8			block_size;
+	u8			enc_type;
+	struct ablkcipher_alg	ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+	/* ARC4: only ECB is supported (chaining bits ignored) */
+	{	.name		= "ecb(arc4)",
+		.drv_name	= "ecb-arc4",
+		.block_size	= 1,
+		.enc_type	= (ENC_TYPE_ALG_RC4_STREAM |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= 1,
+			.max_keysize	= 256,
+			.setkey		= n2_arc4_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+
+	/* DES: ECB CBC and CFB are supported */
+	{	.name		= "ecb(des)",
+		.drv_name	= "ecb-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(des)",
+		.drv_name	= "cbc-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "cfb(des)",
+		.drv_name	= "cfb-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_CFB),
+		.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+
+	/* 3DES: ECB CBC and CFB are supported */
+	{	.name		= "ecb(des3_ede)",
+		.drv_name	= "ecb-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(des3_ede)",
+		.drv_name	= "cbc-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "cfb(des3_ede)",
+		.drv_name	= "cfb-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_CFB),
+		.ablkcipher	= {
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	/* AES: ECB CBC and CTR are supported */
+	{	.name		= "ecb(aes)",
+		.drv_name	= "ecb-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(aes)",
+		.drv_name	= "cbc-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "ctr(aes)",
+		.drv_name	= "ctr-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_COUNTER),
+		.ablkcipher	= {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_encrypt_chaining,
+		},
+	},
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+	const char	*name;
+	int		(*digest)(struct ahash_request *req);
+	u8		digest_size;
+	u8		block_size;
+};
+static const struct n2_hash_tmpl hash_tmpls[] = {
+	{ .name		= "md5",
+	  .digest	= n2_md5_async_digest,
+	  .digest_size	= MD5_DIGEST_SIZE,
+	  .block_size	= MD5_HMAC_BLOCK_SIZE },
+	{ .name		= "sha1",
+	  .digest	= n2_sha1_async_digest,
+	  .digest_size	= SHA1_DIGEST_SIZE,
+	  .block_size	= SHA1_BLOCK_SIZE },
+	{ .name		= "sha256",
+	  .digest	= n2_sha256_async_digest,
+	  .digest_size	= SHA256_DIGEST_SIZE,
+	  .block_size	= SHA256_BLOCK_SIZE },
+	{ .name		= "sha224",
+	  .digest	= n2_sha224_async_digest,
+	  .digest_size	= SHA224_DIGEST_SIZE,
+	  .block_size	= SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+struct n2_ahash_alg {
+	struct list_head	entry;
+	struct ahash_alg	alg;
+};
+static LIST_HEAD(ahash_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+	struct n2_cipher_alg *cipher, *cipher_tmp;
+	struct n2_ahash_alg *alg, *alg_tmp;
+
+	list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+		crypto_unregister_alg(&cipher->alg);
+		list_del(&cipher->entry);
+		kfree(cipher);
+	}
+	list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+		crypto_unregister_ahash(&alg->alg);
+		list_del(&alg->entry);
+		kfree(alg);
+	}
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+	return 0;
+}
+
+static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+	struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+	struct crypto_alg *alg;
+	int err;
+
+	if (!p)
+		return -ENOMEM;
+
+	alg = &p->alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+	alg->cra_priority = N2_CRA_PRIORITY;
+	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+	alg->cra_blocksize = tmpl->block_size;
+	p->enc_type = tmpl->enc_type;
+	alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_u.ablkcipher = tmpl->ablkcipher;
+	alg->cra_init = n2_cipher_cra_init;
+	alg->cra_module = THIS_MODULE;
+
+	list_add(&p->entry, &cipher_algs);
+	err = crypto_register_alg(alg);
+	if (err) {
+		list_del(&p->entry);
+		kfree(p);
+	}
+	return err;
+}
+
+static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+	struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+	struct hash_alg_common *halg;
+	struct crypto_alg *base;
+	struct ahash_alg *ahash;
+	int err;
+
+	if (!p)
+		return -ENOMEM;
+
+	ahash = &p->alg;
+	ahash->init = n2_hash_async_init;
+	ahash->update = n2_hash_async_update;
+	ahash->final = n2_hash_async_final;
+	ahash->finup = n2_hash_async_finup;
+	ahash->digest = tmpl->digest;
+
+	halg = &ahash->halg;
+	halg->digestsize = tmpl->digest_size;
+
+	base = &halg->base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+	base->cra_priority = N2_CRA_PRIORITY;
+	base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
+	base->cra_blocksize = tmpl->block_size;
+	base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+	base->cra_module = THIS_MODULE;
+	base->cra_init = n2_hash_cra_init;
+	base->cra_exit = n2_hash_cra_exit;
+
+	list_add(&p->entry, &ahash_algs);
+	err = crypto_register_ahash(ahash);
+	if (err) {
+		list_del(&p->entry);
+		kfree(p);
+	}
+	return err;
+}
+
+static int __devinit n2_register_algs(void)
+{
+	int i, err = 0;
+
+	mutex_lock(&spu_lock);
+	if (algs_registered++)
+		goto out;
+
+	for (i = 0; i < NUM_HASH_TMPLS; i++) {
+		err = __n2_register_one_ahash(&hash_tmpls[i]);
+		if (err) {
+			__n2_unregister_algs();
+			goto out;
+		}
+	}
+	for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+		err = __n2_register_one_cipher(&cipher_tmpls[i]);
+		if (err) {
+			__n2_unregister_algs();
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&spu_lock);
+	return err;
+}
+
+static void __exit n2_unregister_algs(void)
+{
+	mutex_lock(&spu_lock);
+	if (!--algs_registered)
+		__n2_unregister_algs();
+	mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino.  This isn't very useful to us because all of the
+ * interrupts listed in the of_device node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+			     unsigned long dev_ino)
+{
+	const unsigned int *dev_intrs;
+	unsigned int intr;
+	int i;
+
+	for (i = 0; i < ip->num_intrs; i++) {
+		if (ip->ino_table[i].ino == dev_ino)
+			break;
+	}
+	if (i == ip->num_intrs)
+		return -ENODEV;
+
+	intr = ip->ino_table[i].intr;
+
+	dev_intrs = of_get_property(dev->node, "interrupts", NULL);
+	if (!dev_intrs)
+		return -ENODEV;
+
+	for (i = 0; i < dev->num_irqs; i++) {
+		if (dev_intrs[i] == intr)
+			return i;
+	}
+
+	return -ENODEV;
+}
+
+static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+		       const char *irq_name, struct spu_queue *p,
+		       irq_handler_t handler)
+{
+	unsigned long herr;
+	int index;
+
+	herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+	if (herr)
+		return -EINVAL;
+
+	index = find_devino_index(dev, ip, p->devino);
+	if (index < 0)
+		return index;
+
+	p->irq = dev->irqs[index];
+
+	sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+	return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+			   p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+	return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+	return kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+	if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+		queue_cache[HV_NCS_QTYPE_MAU - 1] =
+			kmem_cache_create("cwq_queue",
+					  (MAU_NUM_ENTRIES *
+					   MAU_ENTRY_SIZE),
+					  MAU_ENTRY_SIZE, 0, NULL);
+	if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+		return -ENOMEM;
+
+	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+		queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+			kmem_cache_create("cwq_queue",
+					  (CWQ_NUM_ENTRIES *
+					   CWQ_ENTRY_SIZE),
+					  CWQ_ENTRY_SIZE, 0, NULL);
+	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+		kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+	cpumask_var_t old_allowed;
+	unsigned long hv_ret;
+
+	if (cpumask_empty(&p->sharing))
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpumask_copy(old_allowed, &current->cpus_allowed);
+
+	set_cpus_allowed_ptr(current, &p->sharing);
+
+	hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+				 CWQ_NUM_ENTRIES, &p->qhandle);
+	if (!hv_ret)
+		sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+	set_cpus_allowed_ptr(current, old_allowed);
+
+	free_cpumask_var(old_allowed);
+
+	return (hv_ret ? -EINVAL : 0);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+	int err;
+
+	p->q = new_queue(p->q_type);
+	if (!p->q)
+		return -ENOMEM;
+
+	err = spu_queue_register(p, p->q_type);
+	if (err) {
+		free_queue(p->q, p->q_type);
+		p->q = NULL;
+	}
+
+	return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+	unsigned long hv_ret;
+
+	if (!p->q)
+		return;
+
+	hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+	if (!hv_ret)
+		free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+	struct spu_queue *p, *n;
+
+	list_for_each_entry_safe(p, n, list, list) {
+		int i;
+
+		for (i = 0; i < NR_CPUS; i++) {
+			if (cpu_to_cwq[i] == p)
+				cpu_to_cwq[i] = NULL;
+		}
+
+		if (p->irq) {
+			free_irq(p->irq, p);
+			p->irq = 0;
+		}
+		spu_queue_destroy(p);
+		list_del(&p->list);
+		kfree(p);
+	}
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+			       struct of_device *dev,
+			       u64 node, struct spu_queue *p,
+			       struct spu_queue **table)
+{
+	u64 arc;
+
+	mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+		u64 tgt = mdesc_arc_target(mdesc, arc);
+		const char *name = mdesc_node_name(mdesc, tgt);
+		const u64 *id;
+
+		if (strcmp(name, "cpu"))
+			continue;
+		id = mdesc_get_property(mdesc, tgt, "id", NULL);
+		if (table[*id] != NULL) {
+			dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
+				dev->node->full_name);
+			return -EINVAL;
+		}
+		cpu_set(*id, p->sharing);
+		table[*id] = p;
+	}
+	return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'.  */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+			    struct of_device *dev, struct mdesc_handle *mdesc,
+			    u64 node, const char *iname, unsigned long q_type,
+			    irq_handler_t handler, struct spu_queue **table)
+{
+	struct spu_queue *p;
+	int err;
+
+	p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+	if (!p) {
+		dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
+			dev->node->full_name);
+		return -ENOMEM;
+	}
+
+	cpus_clear(p->sharing);
+	spin_lock_init(&p->lock);
+	p->q_type = q_type;
+	INIT_LIST_HEAD(&p->jobs);
+	list_add(&p->list, list);
+
+	err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+	if (err)
+		return err;
+
+	err = spu_queue_setup(p);
+	if (err)
+		return err;
+
+	return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+			  struct spu_mdesc_info *ip, struct list_head *list,
+			  const char *exec_name, unsigned long q_type,
+			  irq_handler_t handler, struct spu_queue **table)
+{
+	int err = 0;
+	u64 node;
+
+	mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+		const char *type;
+
+		type = mdesc_get_property(mdesc, node, "type", NULL);
+		if (!type || strcmp(type, exec_name))
+			continue;
+
+		err = handle_exec_unit(ip, list, dev, mdesc, node,
+				       exec_name, q_type, handler, table);
+		if (err) {
+			spu_list_destroy(list);
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
+				   struct spu_mdesc_info *ip)
+{
+	const u64 *intr, *ino;
+	int intr_len, ino_len;
+	int i;
+
+	intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
+	if (!intr)
+		return -ENODEV;
+
+	ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+	if (!intr)
+		return -ENODEV;
+
+	if (intr_len != ino_len)
+		return -EINVAL;
+
+	ip->num_intrs = intr_len / sizeof(u64);
+	ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+				 ip->num_intrs),
+				GFP_KERNEL);
+	if (!ip->ino_table)
+		return -ENOMEM;
+
+	for (i = 0; i < ip->num_intrs; i++) {
+		struct ino_blob *b = &ip->ino_table[i];
+		b->intr = intr[i];
+		b->ino = ino[i];
+	}
+
+	return 0;
+}
+
+static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+					  struct of_device *dev,
+					  struct spu_mdesc_info *ip,
+					  const char *node_name)
+{
+	const unsigned int *reg;
+	u64 node;
+
+	reg = of_get_property(dev->node, "reg", NULL);
+	if (!reg)
+		return -ENODEV;
+
+	mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+		const char *name;
+		const u64 *chdl;
+
+		name = mdesc_get_property(mdesc, node, "name", NULL);
+		if (!name || strcmp(name, node_name))
+			continue;
+		chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+		if (!chdl || (*chdl != *reg))
+			continue;
+		ip->cfg_handle = *chdl;
+		return get_irq_props(mdesc, node, ip);
+	}
+
+	return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int __devinit n2_spu_hvapi_register(void)
+{
+	int err;
+
+	n2_spu_hvapi_major = 2;
+	n2_spu_hvapi_minor = 0;
+
+	err = sun4v_hvapi_register(HV_GRP_NCS,
+				   n2_spu_hvapi_major,
+				   &n2_spu_hvapi_minor);
+
+	if (!err)
+		pr_info("Registered NCS HVAPI version %lu.%lu\n",
+			n2_spu_hvapi_major,
+			n2_spu_hvapi_minor);
+
+	return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+	sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int __devinit grab_global_resources(void)
+{
+	int err = 0;
+
+	mutex_lock(&spu_lock);
+
+	if (global_ref++)
+		goto out;
+
+	err = n2_spu_hvapi_register();
+	if (err)
+		goto out;
+
+	err = queue_cache_init();
+	if (err)
+		goto out_hvapi_release;
+
+	err = -ENOMEM;
+	cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+			     GFP_KERNEL);
+	if (!cpu_to_cwq)
+		goto out_queue_cache_destroy;
+
+	cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+			     GFP_KERNEL);
+	if (!cpu_to_mau)
+		goto out_free_cwq_table;
+
+	err = 0;
+
+out:
+	if (err)
+		global_ref--;
+	mutex_unlock(&spu_lock);
+	return err;
+
+out_free_cwq_table:
+	kfree(cpu_to_cwq);
+	cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+	queue_cache_destroy();
+
+out_hvapi_release:
+	n2_spu_hvapi_unregister();
+	goto out;
+}
+
+static void release_global_resources(void)
+{
+	mutex_lock(&spu_lock);
+	if (!--global_ref) {
+		kfree(cpu_to_cwq);
+		cpu_to_cwq = NULL;
+
+		kfree(cpu_to_mau);
+		cpu_to_mau = NULL;
+
+		queue_cache_destroy();
+		n2_spu_hvapi_unregister();
+	}
+	mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto * __devinit alloc_n2cp(void)
+{
+	struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+	if (np)
+		INIT_LIST_HEAD(&np->cwq_list);
+
+	return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+	if (np->cwq_info.ino_table) {
+		kfree(np->cwq_info.ino_table);
+		np->cwq_info.ino_table = NULL;
+	}
+
+	kfree(np);
+}
+
+static void __devinit n2_spu_driver_version(void)
+{
+	static int n2_spu_version_printed;
+
+	if (n2_spu_version_printed++ == 0)
+		pr_info("%s", version);
+}
+
+static int __devinit n2_crypto_probe(struct of_device *dev,
+				     const struct of_device_id *match)
+{
+	struct mdesc_handle *mdesc;
+	const char *full_name;
+	struct n2_crypto *np;
+	int err;
+
+	n2_spu_driver_version();
+
+	full_name = dev->node->full_name;
+	pr_info("Found N2CP at %s\n", full_name);
+
+	np = alloc_n2cp();
+	if (!np) {
+		dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
+			full_name);
+		return -ENOMEM;
+	}
+
+	err = grab_global_resources();
+	if (err) {
+		dev_err(&dev->dev, "%s: Unable to grab "
+			"global resources.\n", full_name);
+		goto out_free_n2cp;
+	}
+
+	mdesc = mdesc_grab();
+
+	if (!mdesc) {
+		dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+			full_name);
+		err = -ENODEV;
+		goto out_free_global;
+	}
+	err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+	if (err) {
+		dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+			full_name);
+		mdesc_release(mdesc);
+		goto out_free_global;
+	}
+
+	err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+			     "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+			     cpu_to_cwq);
+	mdesc_release(mdesc);
+
+	if (err) {
+		dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
+			full_name);
+		goto out_free_global;
+	}
+
+	err = n2_register_algs();
+	if (err) {
+		dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
+			full_name);
+		goto out_free_spu_list;
+	}
+
+	dev_set_drvdata(&dev->dev, np);
+
+	return 0;
+
+out_free_spu_list:
+	spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+	release_global_resources();
+
+out_free_n2cp:
+	free_n2cp(np);
+
+	return err;
+}
+
+static int __devexit n2_crypto_remove(struct of_device *dev)
+{
+	struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+	n2_unregister_algs();
+
+	spu_list_destroy(&np->cwq_list);
+
+	release_global_resources();
+
+	free_n2cp(np);
+
+	return 0;
+}
+
+static struct n2_mau * __devinit alloc_ncp(void)
+{
+	struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+	if (mp)
+		INIT_LIST_HEAD(&mp->mau_list);
+
+	return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+	if (mp->mau_info.ino_table) {
+		kfree(mp->mau_info.ino_table);
+		mp->mau_info.ino_table = NULL;
+	}
+
+	kfree(mp);
+}
+
+static int __devinit n2_mau_probe(struct of_device *dev,
+				     const struct of_device_id *match)
+{
+	struct mdesc_handle *mdesc;
+	const char *full_name;
+	struct n2_mau *mp;
+	int err;
+
+	n2_spu_driver_version();
+
+	full_name = dev->node->full_name;
+	pr_info("Found NCP at %s\n", full_name);
+
+	mp = alloc_ncp();
+	if (!mp) {
+		dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
+			full_name);
+		return -ENOMEM;
+	}
+
+	err = grab_global_resources();
+	if (err) {
+		dev_err(&dev->dev, "%s: Unable to grab "
+			"global resources.\n", full_name);
+		goto out_free_ncp;
+	}
+
+	mdesc = mdesc_grab();
+
+	if (!mdesc) {
+		dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+			full_name);
+		err = -ENODEV;
+		goto out_free_global;
+	}
+
+	err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+	if (err) {
+		dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+			full_name);
+		mdesc_release(mdesc);
+		goto out_free_global;
+	}
+
+	err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+			     "mau", HV_NCS_QTYPE_MAU, mau_intr,
+			     cpu_to_mau);
+	mdesc_release(mdesc);
+
+	if (err) {
+		dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
+			full_name);
+		goto out_free_global;
+	}
+
+	dev_set_drvdata(&dev->dev, mp);
+
+	return 0;
+
+out_free_global:
+	release_global_resources();
+
+out_free_ncp:
+	free_ncp(mp);
+
+	return err;
+}
+
+static int __devexit n2_mau_remove(struct of_device *dev)
+{
+	struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+	spu_list_destroy(&mp->mau_list);
+
+	release_global_resources();
+
+	free_ncp(mp);
+
+	return 0;
+}
+
+static struct of_device_id n2_crypto_match[] = {
+	{
+		.name = "n2cp",
+		.compatible = "SUNW,n2-cwq",
+	},
+	{
+		.name = "n2cp",
+		.compatible = "SUNW,vf-cwq",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct of_platform_driver n2_crypto_driver = {
+	.name		=	"n2cp",
+	.match_table	=	n2_crypto_match,
+	.probe		=	n2_crypto_probe,
+	.remove		=	__devexit_p(n2_crypto_remove),
+};
+
+static struct of_device_id n2_mau_match[] = {
+	{
+		.name = "ncp",
+		.compatible = "SUNW,n2-mau",
+	},
+	{
+		.name = "ncp",
+		.compatible = "SUNW,vf-mau",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct of_platform_driver n2_mau_driver = {
+	.name		=	"ncp",
+	.match_table	=	n2_mau_match,
+	.probe		=	n2_mau_probe,
+	.remove		=	__devexit_p(n2_mau_remove),
+};
+
+static int __init n2_init(void)
+{
+	int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+
+	if (!err) {
+		err = of_register_driver(&n2_mau_driver, &of_bus_type);
+		if (err)
+			of_unregister_driver(&n2_crypto_driver);
+	}
+	return err;
+}
+
+static void __exit n2_exit(void)
+{
+	of_unregister_driver(&n2_mau_driver);
+	of_unregister_driver(&n2_crypto_driver);
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 0000000..4bcbbea
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,231 @@
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+	u64			intr;
+	u64			ino;
+};
+
+struct spu_mdesc_info {
+	u64			cfg_handle;
+	struct ino_blob		*ino_table;
+	int			num_intrs;
+};
+
+struct n2_crypto {
+	struct spu_mdesc_info	cwq_info;
+	struct list_head	cwq_list;
+};
+
+struct n2_mau {
+	struct spu_mdesc_info	mau_info;
+	struct list_head	mau_list;
+};
+
+#define CWQ_ENTRY_SIZE		64
+#define CWQ_NUM_ENTRIES		64
+
+#define MAU_ENTRY_SIZE		64
+#define MAU_NUM_ENTRIES		64
+
+struct cwq_initial_entry {
+	u64			control;
+	u64			src_addr;
+	u64			auth_key_addr;
+	u64			auth_iv_addr;
+	u64			final_auth_state_addr;
+	u64			enc_key_addr;
+	u64			enc_iv_addr;
+	u64			dest_addr;
+};
+
+struct cwq_ext_entry {
+	u64			len;
+	u64			src_addr;
+	u64			resv1;
+	u64			resv2;
+	u64			resv3;
+	u64			resv4;
+	u64			resv5;
+	u64			resv6;
+};
+
+struct cwq_final_entry {
+	u64			control;
+	u64			src_addr;
+	u64			resv1;
+	u64			resv2;
+	u64			resv3;
+	u64			resv4;
+	u64			resv5;
+	u64			resv6;
+};
+
+#define CONTROL_LEN			0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT		0
+#define CONTROL_HMAC_KEY_LEN		0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT	16
+#define CONTROL_ENC_TYPE		0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT		24
+#define  ENC_TYPE_ALG_RC4_STREAM	0x00ULL
+#define  ENC_TYPE_ALG_RC4_NOSTREAM	0x04ULL
+#define  ENC_TYPE_ALG_DES		0x08ULL
+#define  ENC_TYPE_ALG_3DES		0x0cULL
+#define  ENC_TYPE_ALG_AES128		0x10ULL
+#define  ENC_TYPE_ALG_AES192		0x14ULL
+#define  ENC_TYPE_ALG_AES256		0x18ULL
+#define  ENC_TYPE_ALG_RESERVED		0x1cULL
+#define  ENC_TYPE_ALG_MASK		0x1cULL
+#define  ENC_TYPE_CHAINING_ECB		0x00ULL
+#define  ENC_TYPE_CHAINING_CBC		0x01ULL
+#define  ENC_TYPE_CHAINING_CFB		0x02ULL
+#define  ENC_TYPE_CHAINING_COUNTER	0x03ULL
+#define  ENC_TYPE_CHAINING_MASK		0x03ULL
+#define CONTROL_AUTH_TYPE		0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT		32
+#define  AUTH_TYPE_RESERVED		0x00ULL
+#define  AUTH_TYPE_MD5			0x01ULL
+#define  AUTH_TYPE_SHA1			0x02ULL
+#define  AUTH_TYPE_SHA256		0x03ULL
+#define  AUTH_TYPE_CRC32		0x04ULL
+#define  AUTH_TYPE_HMAC_MD5		0x05ULL
+#define  AUTH_TYPE_HMAC_SHA1		0x06ULL
+#define  AUTH_TYPE_HMAC_SHA256		0x07ULL
+#define  AUTH_TYPE_TCP_CHECKSUM		0x08ULL
+#define  AUTH_TYPE_SSL_HMAC_MD5		0x09ULL
+#define  AUTH_TYPE_SSL_HMAC_SHA1	0x0aULL
+#define  AUTH_TYPE_SSL_HMAC_SHA256	0x0bULL
+#define CONTROL_STRAND			0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT		37
+#define CONTROL_HASH_LEN		0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT		40
+#define CONTROL_INTERRUPT		0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE	0x0002000000000000ULL
+#define CONTROL_RESERVED		0x001c000000000000ULL
+#define CONTROL_HV_DONE			0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR	0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR	0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK		0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK		0x0040000000000000ULL
+#define CONTROL_ENCRYPT			0x0080000000000000ULL
+#define CONTROL_OPCODE			0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT		56
+#define  OPCODE_INPLACE_BIT		0x80ULL
+#define  OPCODE_SSL_KEYBLOCK		0x10ULL
+#define  OPCODE_COPY			0x20ULL
+#define  OPCODE_ENCRYPT			0x40ULL
+#define  OPCODE_AUTH_MAC		0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU		0x01
+#define HV_NCS_QTYPE_CWQ		0x02
+
+/* ncs_qconf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QCONF
+ * ARG0:	Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1:	Real address of queue, or handle for unconfigure
+ * ARG2:	Number of entries in queue, zero for unconfigure
+ * RET0:	status
+ * RET1:	queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64.  The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue.  The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF		0x111
+
+/* ncs_qinfo()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QINFO
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2:	Queue base address
+ * RET3:	Number of entries
+ */
+#define HV_FAST_NCS_QINFO		0x112
+
+/* ncs_gethead()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_GETHEAD
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD		0x113
+
+/* ncs_gettail()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_GETTAIL
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL		0x114
+
+/* ncs_settail()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_SETTAIL
+ * ARG0:	Queue handle
+ * ARG1:	New tail offset
+ * RET0:	status
+ */
+#define HV_FAST_NCS_SETTAIL		0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO	0x116
+
+/* ncs_sethead_marker()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0:	Queue handle
+ * ARG1:	New head offset
+ * RET0:	status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER	0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+				     unsigned long queue_ra,
+				     unsigned long num_entries,
+				     unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+				     unsigned long *queue_type,
+				     unsigned long *queue_ra,
+				     unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+				       unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+				       unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+				       unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+						 unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+					      unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 0000000..8b03433
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x)		(0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x)			(0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE		SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE			16
+
+#define SHA_REG_DIGCNT			0x14
+
+#define SHA_REG_CTRL			0x18
+#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
+#define SHA_REG_CTRL_ALGO		(1 << 2)
+#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
+
+#define SHA_REG_REV			0x5C
+#define SHA_REG_REV_MAJOR		0xF0
+#define SHA_REG_REV_MINOR		0x0F
+
+#define SHA_REG_MASK			0x60
+#define SHA_REG_MASK_DMA_EN		(1 << 3)
+#define SHA_REG_MASK_IT_EN		(1 << 2)
+#define SHA_REG_MASK_SOFTRESET		(1 << 1)
+#define SHA_REG_AUTOIDLE		(1 << 0)
+
+#define SHA_REG_SYSSTATUS		0x64
+#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL	HZ
+
+#define FLAGS_FIRST		0x0001
+#define FLAGS_FINUP		0x0002
+#define FLAGS_FINAL		0x0004
+#define FLAGS_FAST		0x0008
+#define FLAGS_SHA1		0x0010
+#define FLAGS_DMA_ACTIVE	0x0020
+#define FLAGS_OUTPUT_READY	0x0040
+#define FLAGS_CLEAN		0x0080
+#define FLAGS_INIT		0x0100
+#define FLAGS_CPU		0x0200
+#define FLAGS_HMAC		0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY		16
+
+#define OP_UPDATE	1
+#define OP_FINAL	2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+	struct omap_sham_dev	*dd;
+	unsigned long		flags;
+	unsigned long		op;
+
+	size_t			digcnt;
+	u8			*buffer;
+	size_t			bufcnt;
+	size_t			buflen;
+	dma_addr_t		dma_addr;
+
+	/* walk state */
+	struct scatterlist	*sg;
+	unsigned int		offset;	/* offset in current sg */
+	unsigned int		total;	/* total request */
+};
+
+struct omap_sham_hmac_ctx {
+	struct crypto_shash	*shash;
+	u8			ipad[SHA1_MD5_BLOCK_SIZE];
+	u8			opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+	struct omap_sham_dev	*dd;
+
+	unsigned long		flags;
+
+	/* fallback stuff */
+	struct crypto_shash	*fallback;
+
+	struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH	1
+
+struct omap_sham_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	struct device		*dev;
+	void __iomem		*io_base;
+	int			irq;
+	struct clk		*iclk;
+	spinlock_t		lock;
+	int			dma;
+	int			dma_lch;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	unsigned long		flags;
+	struct crypto_queue	queue;
+	struct ahash_request	*req;
+};
+
+struct omap_sham_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+	unsigned long		flags;
+};
+
+static struct omap_sham_drv sham = {
+	.dev_list = LIST_HEAD_INIT(sham.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+	return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+					u32 offset, u32 value)
+{
+	__raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+					u32 value, u32 mask)
+{
+	u32 val;
+
+	val = omap_sham_read(dd, address);
+	val &= ~mask;
+	val |= value;
+	omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+	while (!(omap_sham_read(dd, offset) & bit)) {
+		if (time_is_before_jiffies(timeout))
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)req->result;
+	int i;
+
+	if (likely(ctx->flags & FLAGS_SHA1)) {
+		/* SHA1 results are in big endian */
+		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+			if (out)
+				hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+							SHA_REG_DIGEST(i)));
+			else
+				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+							cpu_to_be32(hash[i]));
+	} else {
+		/* MD5 results are in little endian */
+		for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+			if (out)
+				hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+							SHA_REG_DIGEST(i)));
+			else
+				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+							cpu_to_le32(hash[i]));
+	}
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+				 int final, int dma)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	u32 val = length << 5, mask;
+
+	if (unlikely(!ctx->digcnt)) {
+
+		clk_enable(dd->iclk);
+
+		if (!(dd->flags & FLAGS_INIT)) {
+			omap_sham_write_mask(dd, SHA_REG_MASK,
+				SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+			if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+						SHA_REG_SYSSTATUS_RESETDONE))
+				return -ETIMEDOUT;
+
+			dd->flags |= FLAGS_INIT;
+		}
+	} else {
+		omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+	}
+
+	omap_sham_write_mask(dd, SHA_REG_MASK,
+		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+	/*
+	 * Setting ALGO_CONST only for the first iteration
+	 * and CLOSE_HASH only for the last one.
+	 */
+	if (ctx->flags & FLAGS_SHA1)
+		val |= SHA_REG_CTRL_ALGO;
+	if (!ctx->digcnt)
+		val |= SHA_REG_CTRL_ALGO_CONST;
+	if (final)
+		val |= SHA_REG_CTRL_CLOSE_HASH;
+
+	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+	return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+			      size_t length, int final)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	int err, count, len32;
+	const u32 *buffer = (const u32 *)buf;
+
+	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length, final);
+
+	err = omap_sham_write_ctrl(dd, length, final, 0);
+	if (err)
+		return err;
+
+	if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+		return -ETIMEDOUT;
+
+	ctx->digcnt += length;
+
+	if (final)
+		ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	for (count = 0; count < len32; count++)
+		omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+	return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+			      size_t length, int final)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	int err, len32;
+
+	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length, final);
+
+	/* flush cache entries related to our page */
+	if (dma_addr == ctx->dma_addr)
+		dma_sync_single_for_device(dd->dev, dma_addr, length,
+					   DMA_TO_DEVICE);
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+			1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+	omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+				dma_addr, 0, 0);
+
+	err = omap_sham_write_ctrl(dd, length, final, 1);
+	if (err)
+		return err;
+
+	ctx->digcnt += length;
+
+	if (final)
+		ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+	dd->flags |= FLAGS_DMA_ACTIVE;
+
+	omap_start_dma(dd->dma_lch);
+
+	return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+				const u8 *data, size_t length)
+{
+	size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+	count = min(count, ctx->total);
+	if (count <= 0)
+		return 0;
+	memcpy(ctx->buffer + ctx->bufcnt, data, count);
+	ctx->bufcnt += count;
+
+	return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+	size_t count;
+
+	while (ctx->sg) {
+		count = omap_sham_append_buffer(ctx,
+				sg_virt(ctx->sg) + ctx->offset,
+				ctx->sg->length - ctx->offset);
+		if (!count)
+			break;
+		ctx->offset += count;
+		ctx->total -= count;
+		if (ctx->offset == ctx->sg->length) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+			else
+				ctx->total = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int final;
+	size_t count;
+
+	if (!ctx->total)
+		return 0;
+
+	omap_sham_append_sg(ctx);
+
+	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+	dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+					 ctx->bufcnt, ctx->digcnt, final);
+
+	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+	}
+
+	return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int length;
+
+	ctx->flags |= FLAGS_FAST;
+
+	length = min(ctx->total, sg_dma_len(ctx->sg));
+	ctx->total = length;
+
+	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+		dev_err(dd->dev, "dma_map_sg  error\n");
+		return -EINVAL;
+	}
+
+	ctx->total -= length;
+
+	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	int bufcnt;
+
+	omap_sham_append_sg(ctx);
+	bufcnt = ctx->bufcnt;
+	ctx->bufcnt = 0;
+
+	return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+	omap_stop_dma(dd->dma_lch);
+	if (ctx->flags & FLAGS_FAST)
+		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (ctx->flags & FLAGS_CLEAN) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return;
+	}
+	ctx->flags |= FLAGS_CLEAN;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (ctx->digcnt)
+		clk_disable(dd->iclk);
+
+	if (ctx->dma_addr)
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+				 DMA_TO_DEVICE);
+
+	if (ctx->buffer)
+		free_page((unsigned long)ctx->buffer);
+
+	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = NULL, *tmp;
+
+	spin_lock_bh(&sham.lock);
+	if (!tctx->dd) {
+		list_for_each_entry(tmp, &sham.dev_list, list) {
+			dd = tmp;
+			break;
+		}
+		tctx->dd = dd;
+	} else {
+		dd = tctx->dd;
+	}
+	spin_unlock_bh(&sham.lock);
+
+	ctx->dd = dd;
+
+	ctx->flags = 0;
+
+	ctx->flags |= FLAGS_FIRST;
+
+	dev_dbg(dd->dev, "init: digest size: %d\n",
+		crypto_ahash_digestsize(tfm));
+
+	if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+		ctx->flags |= FLAGS_SHA1;
+
+	ctx->bufcnt = 0;
+	ctx->digcnt = 0;
+
+	ctx->buflen = PAGE_SIZE;
+	ctx->buffer = (void *)__get_free_page(
+				(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+				GFP_KERNEL : GFP_ATOMIC);
+	if (!ctx->buffer)
+		return -ENOMEM;
+
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+					DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+		free_page((unsigned long)ctx->buffer);
+		return -EINVAL;
+	}
+
+	if (tctx->flags & FLAGS_HMAC) {
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+		memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+		ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+		ctx->flags |= FLAGS_HMAC;
+	}
+
+	return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+
+	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+		 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+	if (ctx->flags & FLAGS_CPU)
+		err = omap_sham_update_cpu(dd);
+	else if (ctx->flags & FLAGS_FAST)
+		err = omap_sham_update_dma_fast(dd);
+	else
+		err = omap_sham_update_dma_slow(dd);
+
+	/* wait for dma completion before can take more data */
+	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+	return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err = 0, use_dma = 1;
+
+	if (ctx->bufcnt <= 64)
+		/* faster to handle last block with cpu */
+		use_dma = 0;
+
+	if (use_dma)
+		err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+	else
+		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+	ctx->bufcnt = 0;
+
+	if (err != -EINPROGRESS)
+		omap_sham_cleanup(req);
+
+	dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+	return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_hmac_ctx *bctx = tctx->base;
+	int bs = crypto_shash_blocksize(bctx->shash);
+	int ds = crypto_shash_digestsize(bctx->shash);
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(bctx->shash)];
+	} desc;
+
+	desc.shash.tfm = bctx->shash;
+	desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+	return crypto_shash_init(&desc.shash) ?:
+	       crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+	       crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!err) {
+		omap_sham_copy_hash(ctx->dd->req, 1);
+		if (ctx->flags & FLAGS_HMAC)
+			err = omap_sham_finish_req_hmac(req);
+	}
+
+	if (ctx->flags & FLAGS_FINAL)
+		omap_sham_cleanup(req);
+
+	clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+	if (req->base.complete)
+		req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct omap_sham_reqctx *ctx;
+	struct ahash_request *req, *prev_req;
+	unsigned long flags;
+	int err = 0;
+
+	if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+		return 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (!async_req)
+		clear_bit(FLAGS_BUSY, &dd->flags);
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return 0;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+
+	prev_req = dd->req;
+	dd->req = req;
+
+	ctx = ahash_request_ctx(req);
+
+	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+						ctx->op, req->nbytes);
+
+	if (req != prev_req && ctx->digcnt)
+		/* request has changed - restore hash */
+		omap_sham_copy_hash(req, 0);
+
+	if (ctx->op == OP_UPDATE) {
+		err = omap_sham_update_req(dd);
+		if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+			/* no final() after finup() */
+			err = omap_sham_final_req(dd);
+	} else if (ctx->op == OP_FINAL) {
+		err = omap_sham_final_req(dd);
+	}
+
+	if (err != -EINPROGRESS) {
+		/* done_task will not finish it, so do it here */
+		omap_sham_finish_req(req, err);
+		tasklet_schedule(&dd->queue_task);
+	}
+
+	dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+	return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_dev *dd = tctx->dd;
+	unsigned long flags;
+	int err;
+
+	ctx->op = op;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	err = ahash_enqueue_request(&dd->queue, req);
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	omap_sham_handle_queue(dd);
+
+	return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return 0;
+
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->offset = 0;
+
+	if (ctx->flags & FLAGS_FINUP) {
+		if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+			/*
+			* OMAP HW accel works only with buffers >= 9
+			* will switch to bypass in final()
+			* final has the same request and data
+			*/
+			omap_sham_append_sg(ctx);
+			return 0;
+		} else if (ctx->bufcnt + ctx->total <= 64) {
+			ctx->flags |= FLAGS_CPU;
+		} else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+			/* may be can use faster functions */
+			int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+								sizeof(u32));
+
+			if (aligned && (ctx->flags & FLAGS_FIRST))
+				/* digest: first and final */
+				ctx->flags |= FLAGS_FAST;
+
+			ctx->flags &= ~FLAGS_FIRST;
+		}
+	} else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+		/* if not finaup -> not fast */
+		omap_sham_append_sg(ctx);
+		return 0;
+	}
+
+	return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+				  const u8 *data, unsigned int len, u8 *out)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(shash)];
+	} desc;
+
+	desc.shash.tfm = shash;
+	desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+	return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+				      ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err = 0;
+
+	ctx->flags |= FLAGS_FINUP;
+
+	/* OMAP HW accel works only with buffers >= 9 */
+	/* HMAC is always >= 9 because of ipad */
+	if ((ctx->digcnt + ctx->bufcnt) < 9)
+		err = omap_sham_final_shash(req);
+	else if (ctx->bufcnt)
+		return omap_sham_enqueue(req, OP_FINAL);
+
+	omap_sham_cleanup(req);
+
+	return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->flags |= FLAGS_FINUP;
+
+	err1 = omap_sham_update(req);
+	if (err1 == -EINPROGRESS)
+		return err1;
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if udpate() failed, except EINPROGRESS
+	 */
+	err2 = omap_sham_final(req);
+
+	return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+	return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+		      unsigned int keylen)
+{
+	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct omap_sham_hmac_ctx *bctx = tctx->base;
+	int bs = crypto_shash_blocksize(bctx->shash);
+	int ds = crypto_shash_digestsize(bctx->shash);
+	int err, i;
+	err = crypto_shash_setkey(tctx->fallback, key, keylen);
+	if (err)
+		return err;
+
+	if (keylen > bs) {
+		err = omap_sham_shash_digest(bctx->shash,
+				crypto_shash_get_flags(bctx->shash),
+				key, keylen, bctx->ipad);
+		if (err)
+			return err;
+		keylen = ds;
+	} else {
+		memcpy(bctx->ipad, key, keylen);
+	}
+
+	memset(bctx->ipad + keylen, 0, bs - keylen);
+	memcpy(bctx->opad, bctx->ipad, bs);
+
+	for (i = 0; i < bs; i++) {
+		bctx->ipad[i] ^= 0x36;
+		bctx->opad[i] ^= 0x5c;
+	}
+
+	return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+
+	/* Allocate a fallback and abort if it failed. */
+	tctx->fallback = crypto_alloc_shash(alg_name, 0,
+					    CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(tctx->fallback)) {
+		pr_err("omap-sham: fallback driver '%s' "
+				"could not be loaded.\n", alg_name);
+		return PTR_ERR(tctx->fallback);
+	}
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct omap_sham_reqctx));
+
+	if (alg_base) {
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		tctx->flags |= FLAGS_HMAC;
+		bctx->shash = crypto_alloc_shash(alg_base, 0,
+						CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(bctx->shash)) {
+			pr_err("omap-sham: base driver '%s' "
+					"could not be loaded.\n", alg_base);
+			crypto_free_shash(tctx->fallback);
+			return PTR_ERR(bctx->shash);
+		}
+
+	}
+
+	return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(tctx->fallback);
+	tctx->fallback = NULL;
+
+	if (tctx->flags & FLAGS_HMAC) {
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		crypto_free_shash(bctx->shash);
+	}
+}
+
+static struct ahash_alg algs[] = {
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "omap-sha1",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= MD5_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "md5",
+		.cra_driver_name	= "omap-md5",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha1)",
+		.cra_driver_name	= "omap-hmac-sha1",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha1_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= MD5_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(md5)",
+		.cra_driver_name	= "omap-hmac-md5",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_md5_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+	struct ahash_request *req = dd->req;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int ready = 1;
+
+	if (ctx->flags & FLAGS_OUTPUT_READY) {
+		ctx->flags &= ~FLAGS_OUTPUT_READY;
+		ready = 1;
+	}
+
+	if (dd->flags & FLAGS_DMA_ACTIVE) {
+		dd->flags &= ~FLAGS_DMA_ACTIVE;
+		omap_sham_update_dma_stop(dd);
+		omap_sham_update_dma_slow(dd);
+	}
+
+	if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+		dev_dbg(dd->dev, "update done\n");
+		/* finish curent request */
+		omap_sham_finish_req(req, 0);
+		/* start new request */
+		omap_sham_handle_queue(dd);
+	}
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+	omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+	struct omap_sham_dev *dd = dev_id;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+	if (!ctx) {
+		dev_err(dd->dev, "unknown interrupt.\n");
+		return IRQ_HANDLED;
+	}
+
+	if (unlikely(ctx->flags & FLAGS_FINAL))
+		/* final -> allow device to go to power-saving mode */
+		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+				 SHA_REG_CTRL_OUTPUT_READY);
+	omap_sham_read(dd, SHA_REG_CTRL);
+
+	ctx->flags |= FLAGS_OUTPUT_READY;
+	tasklet_schedule(&dd->done_task);
+
+	return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+	struct omap_sham_dev *dd = data;
+
+	if (likely(lch == dd->dma_lch))
+		tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+	int err;
+
+	dd->dma_lch = -1;
+
+	err = omap_request_dma(dd->dma, dev_name(dd->dev),
+			omap_sham_dma_callback, dd, &dd->dma_lch);
+	if (err) {
+		dev_err(dd->dev, "Unable to request DMA channel\n");
+		return err;
+	}
+	omap_set_dma_dest_params(dd->dma_lch, 0,
+			OMAP_DMA_AMODE_CONSTANT,
+			dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_16);
+
+	return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+	if (dd->dma_lch >= 0) {
+		omap_free_dma(dd->dma_lch);
+		dd->dma_lch = -1;
+	}
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+	struct omap_sham_dev *dd;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int err, i, j;
+
+	dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+	if (dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		err = -ENOMEM;
+		goto data_err;
+	}
+	dd->dev = dev;
+	platform_set_drvdata(pdev, dd);
+
+	INIT_LIST_HEAD(&dd->list);
+	spin_lock_init(&dd->lock);
+	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+	tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+	dd->irq = -1;
+
+	/* Get the base address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	dd->phys_base = res->start;
+
+	/* Get the DMA */
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(dev, "no DMA resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	dd->dma = res->start;
+
+	/* Get the IRQ */
+	dd->irq = platform_get_irq(pdev,  0);
+	if (dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = dd->irq;
+		goto res_err;
+	}
+
+	err = request_irq(dd->irq, omap_sham_irq,
+			IRQF_TRIGGER_LOW, dev_name(dev), dd);
+	if (err) {
+		dev_err(dev, "unable to request irq.\n");
+		goto res_err;
+	}
+
+	err = omap_sham_dma_init(dd);
+	if (err)
+		goto dma_err;
+
+	/* Initializing the clock */
+	dd->iclk = clk_get(dev, "ick");
+	if (!dd->iclk) {
+		dev_err(dev, "clock intialization failed.\n");
+		err = -ENODEV;
+		goto clk_err;
+	}
+
+	dd->io_base = ioremap(dd->phys_base, SZ_4K);
+	if (!dd->io_base) {
+		dev_err(dev, "can't ioremap\n");
+		err = -ENOMEM;
+		goto io_err;
+	}
+
+	clk_enable(dd->iclk);
+	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+		(omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+		omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+	clk_disable(dd->iclk);
+
+	spin_lock(&sham.lock);
+	list_add_tail(&dd->list, &sham.dev_list);
+	spin_unlock(&sham.lock);
+
+	for (i = 0; i < ARRAY_SIZE(algs); i++) {
+		err = crypto_register_ahash(&algs[i]);
+		if (err)
+			goto err_algs;
+	}
+
+	return 0;
+
+err_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&algs[j]);
+	iounmap(dd->io_base);
+io_err:
+	clk_put(dd->iclk);
+clk_err:
+	omap_sham_dma_cleanup(dd);
+dma_err:
+	if (dd->irq >= 0)
+		free_irq(dd->irq, dd);
+res_err:
+	kfree(dd);
+	dd = NULL;
+data_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+	static struct omap_sham_dev *dd;
+	int i;
+
+	dd = platform_get_drvdata(pdev);
+	if (!dd)
+		return -ENODEV;
+	spin_lock(&sham.lock);
+	list_del(&dd->list);
+	spin_unlock(&sham.lock);
+	for (i = 0; i < ARRAY_SIZE(algs); i++)
+		crypto_unregister_ahash(&algs[i]);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
+	iounmap(dd->io_base);
+	clk_put(dd->iclk);
+	omap_sham_dma_cleanup(dd);
+	if (dd->irq >= 0)
+		free_irq(dd->irq, dd);
+	kfree(dd);
+	dd = NULL;
+
+	return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+	.probe	= omap_sham_probe,
+	.remove	= omap_sham_remove,
+	.driver	= {
+		.name	= "omap-sham",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init omap_sham_mod_init(void)
+{
+	pr_info("loading %s driver\n", "omap-sham");
+
+	if (!cpu_class_is_omap2() ||
+		omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+		pr_err("Unsupported cpu\n");
+		return -ENODEV;
+	}
+
+	return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+	platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc558a0..6a0f59d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
 /*
  * talitos - Freescale Integrated Security Engine (SEC) device driver
  *
- * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
  *
  * Scatterlist Crypto API glue code copied from files with the following:
  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -43,9 +43,12 @@
 #include <crypto/aes.h>
 #include <crypto/des.h>
 #include <crypto/sha.h>
+#include <crypto/md5.h>
 #include <crypto/aead.h>
 #include <crypto/authenc.h>
 #include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
 #include <crypto/scatterwalk.h>
 
 #include "talitos.h"
@@ -65,6 +68,13 @@
 	__be32 ptr;	/* address */
 };
 
+static const struct talitos_ptr zero_entry = {
+	.len = 0,
+	.j_extent = 0,
+	.eptr = 0,
+	.ptr = 0
+};
+
 /* descriptor */
 struct talitos_desc {
 	__be32 hdr;			/* header high bits */
@@ -146,6 +156,7 @@
 /* .features flag */
 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
 
 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
 {
@@ -692,7 +703,7 @@
 #define TALITOS_MAX_KEY_SIZE		64
 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 
-#define MD5_DIGEST_SIZE   16
+#define MD5_BLOCK_SIZE    64
 
 struct talitos_ctx {
 	struct device *dev;
@@ -705,6 +716,23 @@
 	unsigned int authsize;
 };
 
+#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+	u64 count;
+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+	unsigned int hw_context_size;
+	u8 buf[HASH_MAX_BLOCK_SIZE];
+	u8 bufnext[HASH_MAX_BLOCK_SIZE];
+	unsigned int swinit;
+	unsigned int first;
+	unsigned int last;
+	unsigned int to_hash_later;
+	struct scatterlist bufsl[2];
+	struct scatterlist *psrc;
+};
+
 static int aead_setauthsize(struct crypto_aead *authenc,
 			    unsigned int authsize)
 {
@@ -821,10 +849,14 @@
 		else
 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 
-		if (edesc->dst_is_chained)
-			talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
-		else
-			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+		if (dst) {
+			if (edesc->dst_is_chained)
+				talitos_unmap_sg_chain(dev, dst,
+						       DMA_FROM_DEVICE);
+			else
+				dma_unmap_sg(dev, dst, dst_nents,
+					     DMA_FROM_DEVICE);
+		}
 	} else
 		if (edesc->src_is_chained)
 			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@
 	return sg_nents;
 }
 
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buf:		 Where to copy to
+ * @buflen:		 The number of bytes to copy
+ * @skip:		 The number of bytes to skip before copying.
+ *                       Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+				    void *buf, size_t buflen, unsigned int skip)
+{
+	unsigned int offset = 0;
+	unsigned int boffset = 0;
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int sg_flags = SG_MITER_ATOMIC;
+	size_t total_buffer = buflen + skip;
+
+	sg_flags |= SG_MITER_FROM_SG;
+
+	sg_miter_start(&miter, sgl, nents, sg_flags);
+
+	local_irq_save(flags);
+
+	while (sg_miter_next(&miter) && offset < total_buffer) {
+		unsigned int len;
+		unsigned int ignore;
+
+		if ((offset + miter.length) > skip) {
+			if (offset < skip) {
+				/* Copy part of this segment */
+				ignore = skip - offset;
+				len = miter.length - ignore;
+				memcpy(buf + boffset, miter.addr + ignore, len);
+			} else {
+				/* Copy all of this segment */
+				len = miter.length;
+				memcpy(buf + boffset, miter.addr, len);
+			}
+			boffset += len;
+		}
+		offset += miter.length;
+	}
+
+	sg_miter_stop(&miter);
+
+	local_irq_restore(flags);
+	return boffset;
+}
+
 /*
  * allocate and map the extended descriptor
  */
 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 						 struct scatterlist *src,
 						 struct scatterlist *dst,
+						 int hash_result,
 						 unsigned int cryptlen,
 						 unsigned int authsize,
 						 int icv_stashing,
@@ -1139,11 +1226,16 @@
 	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
 	src_nents = (src_nents == 1) ? 0 : src_nents;
 
-	if (dst == src) {
-		dst_nents = src_nents;
+	if (hash_result) {
+		dst_nents = 0;
 	} else {
-		dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
-		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+		if (dst == src) {
+			dst_nents = src_nents;
+		} else {
+			dst_nents = sg_count(dst, cryptlen + authsize,
+					     &dst_chained);
+			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+		}
 	}
 
 	/*
@@ -1172,8 +1264,10 @@
 	edesc->src_is_chained = src_chained;
 	edesc->dst_is_chained = dst_chained;
 	edesc->dma_len = dma_len;
-	edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
-					     edesc->dma_len, DMA_BIDIRECTIONAL);
+	if (dma_len)
+		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+						     edesc->dma_len,
+						     DMA_BIDIRECTIONAL);
 
 	return edesc;
 }
@@ -1184,7 +1278,7 @@
 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
-	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
 				   areq->cryptlen, ctx->authsize, icv_stashing,
 				   areq->base.flags);
 }
@@ -1441,8 +1535,8 @@
 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
-	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
-				   0, 0, areq->base.flags);
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+				   areq->nbytes, 0, 0, areq->base.flags);
 }
 
 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@
 	return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
 }
 
+static void common_nonsnoop_hash_unmap(struct device *dev,
+				       struct talitos_edesc *edesc,
+				       struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+	/* When using hashctx-in, must unmap it. */
+	if (edesc->desc.ptr[1].len)
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+					 DMA_TO_DEVICE);
+
+	if (edesc->desc.ptr[2].len)
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+					 DMA_TO_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+
+	if (edesc->dma_len)
+		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+				 DMA_BIDIRECTIONAL);
+
+}
+
+static void ahash_done(struct device *dev,
+		       struct talitos_desc *desc, void *context,
+		       int err)
+{
+	struct ahash_request *areq = context;
+	struct talitos_edesc *edesc =
+		 container_of(desc, struct talitos_edesc, desc);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	if (!req_ctx->last && req_ctx->to_hash_later) {
+		/* Position any partial block for next update/final/finup */
+		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+	}
+	common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+	kfree(edesc);
+
+	areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+				struct ahash_request *areq, unsigned int length,
+				void (*callback) (struct device *dev,
+						  struct talitos_desc *desc,
+						  void *context, int error))
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	int sg_count, ret;
+
+	/* first DWORD empty */
+	desc->ptr[0] = zero_entry;
+
+	/* hash context in */
+	if (!req_ctx->first || req_ctx->swinit) {
+		map_single_talitos_ptr(dev, &desc->ptr[1],
+				       req_ctx->hw_context_size,
+				       (char *)req_ctx->hw_context, 0,
+				       DMA_TO_DEVICE);
+		req_ctx->swinit = 0;
+	} else {
+		desc->ptr[1] = zero_entry;
+		/* Indicate next op is not the first. */
+		req_ctx->first = 0;
+	}
+
+	/* HMAC key */
+	if (ctx->keylen)
+		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
+	else
+		desc->ptr[2] = zero_entry;
+
+	/*
+	 * data in
+	 */
+	desc->ptr[3].len = cpu_to_be16(length);
+	desc->ptr[3].j_extent = 0;
+
+	sg_count = talitos_map_sg(dev, req_ctx->psrc,
+				  edesc->src_nents ? : 1,
+				  DMA_TO_DEVICE,
+				  edesc->src_is_chained);
+
+	if (sg_count == 1) {
+		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
+	} else {
+		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
+					  &edesc->link_tbl[0]);
+		if (sg_count > 1) {
+			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+			dma_sync_single_for_device(ctx->dev,
+						   edesc->dma_link_tbl,
+						   edesc->dma_len,
+						   DMA_BIDIRECTIONAL);
+		} else {
+			/* Only one segment now, so no link tbl needed */
+			to_talitos_ptr(&desc->ptr[3],
+				       sg_dma_address(req_ctx->psrc));
+		}
+	}
+
+	/* fifth DWORD empty */
+	desc->ptr[4] = zero_entry;
+
+	/* hash/HMAC out -or- hash context out */
+	if (req_ctx->last)
+		map_single_talitos_ptr(dev, &desc->ptr[5],
+				       crypto_ahash_digestsize(tfm),
+				       areq->result, 0, DMA_FROM_DEVICE);
+	else
+		map_single_talitos_ptr(dev, &desc->ptr[5],
+				       req_ctx->hw_context_size,
+				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+	/* last DWORD empty */
+	desc->ptr[6] = zero_entry;
+
+	ret = talitos_submit(dev, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		common_nonsnoop_hash_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+					       unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+				   nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	/* Initialize the context */
+	req_ctx->count = 0;
+	req_ctx->first = 1; /* first indicates h/w must init its context */
+	req_ctx->swinit = 0; /* assume h/w init of context */
+	req_ctx->hw_context_size =
+		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+	return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	ahash_init(areq);
+	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+	req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
+	req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
+	req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
+	req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
+	req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
+	req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
+	req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
+	req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
+
+	/* init 64-bit count */
+	req_ctx->hw_context[8] = 0;
+	req_ctx->hw_context[9] = 0;
+
+	return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_edesc *edesc;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int nbytes_to_hash;
+	unsigned int to_hash_later;
+	unsigned int index;
+	int chained;
+
+	index = req_ctx->count & (blocksize - 1);
+	req_ctx->count += nbytes;
+
+	if (!req_ctx->last && (index + nbytes) < blocksize) {
+		/* Buffer the partial block */
+		sg_copy_to_buffer(areq->src,
+				  sg_count(areq->src, nbytes, &chained),
+				  req_ctx->buf + index, nbytes);
+		return 0;
+	}
+
+	if (index) {
+		/* partial block from previous update; chain it in. */
+		sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
+		sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
+		if (nbytes)
+			scatterwalk_sg_chain(req_ctx->bufsl, 2,
+					     areq->src);
+		req_ctx->psrc = req_ctx->bufsl;
+	} else {
+		req_ctx->psrc = areq->src;
+	}
+	nbytes_to_hash =  index + nbytes;
+	if (!req_ctx->last) {
+		to_hash_later = (nbytes_to_hash & (blocksize - 1));
+		if (to_hash_later) {
+			int nents;
+			/* Must copy to_hash_later bytes from the end
+			 * to bufnext (a partial block) for later.
+			 */
+			nents = sg_count(areq->src, nbytes, &chained);
+			sg_copy_end_to_buffer(areq->src, nents,
+					      req_ctx->bufnext,
+					      to_hash_later,
+					      nbytes - to_hash_later);
+
+			/* Adjust count for what will be hashed now */
+			nbytes_to_hash -= to_hash_later;
+		}
+		req_ctx->to_hash_later = to_hash_later;
+	}
+
+	/* allocate extended descriptor */
+	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	edesc->desc.hdr = ctx->desc_hdr_template;
+
+	/* On last one, request SEC to pad; otherwise continue */
+	if (req_ctx->last)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+	else
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+	/* request SEC to INIT hash. */
+	if (req_ctx->first && !req_ctx->swinit)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+	/* When the tfm context has a keylen, it's an HMAC.
+	 * A first or last (ie. not middle) descriptor must request HMAC.
+	 */
+	if (ctx->keylen && (req_ctx->first || req_ctx->last))
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+				    ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 0;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+	ahash->init(areq);
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
 struct talitos_alg_template {
-	struct crypto_alg alg;
+	u32 type;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+	} alg;
 	__be32 desc_hdr_template;
 };
 
 static struct talitos_alg_template driver_algs[] = {
 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
 			.cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@
 		                     DESC_HDR_MODE1_MDEU_PAD |
 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
 			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@
 		                     DESC_HDR_MODE1_MDEU_PAD |
 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
 			.cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@
 		                     DESC_HDR_MODE1_MDEU_PAD |
 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
 			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@
 		                     DESC_HDR_MODE1_MDEU_PAD |
 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(md5),cbc(aes))",
 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
 			.cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@
 		                     DESC_HDR_MODE1_MDEU_PAD |
 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.crypto = {
 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
 			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@
 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
 	},
 	/* ABLKCIPHER algorithms. */
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
 			.cra_name = "cbc(aes)",
 			.cra_driver_name = "cbc-aes-talitos",
 			.cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@
 				     DESC_HDR_SEL0_AESU |
 				     DESC_HDR_MODE0_AESU_CBC,
 	},
-	{
-		.alg = {
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
 			.cra_name = "cbc(des3_ede)",
 			.cra_driver_name = "cbc-3des-talitos",
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@
 			             DESC_HDR_SEL0_DEU |
 		                     DESC_HDR_MODE0_DEU_CBC |
 		                     DESC_HDR_MODE0_DEU_3DES,
-	}
+	},
+	/* AHASH algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "md5-talitos",
+				.cra_blocksize = MD5_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_MD5,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-talitos",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA1,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "sha224-talitos",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA224,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "sha256-talitos",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA256,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "sha384-talitos",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA384,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "sha512-talitos",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA512,
+	},
 };
 
 struct talitos_crypto_alg {
 	struct list_head entry;
 	struct device *dev;
-	__be32 desc_hdr_template;
-	struct crypto_alg crypto_alg;
+	struct talitos_alg_template algt;
 };
 
 static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@
 	struct talitos_crypto_alg *talitos_alg;
 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	talitos_alg =  container_of(alg, struct talitos_crypto_alg, crypto_alg);
+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+		talitos_alg = container_of(__crypto_ahash_alg(alg),
+					   struct talitos_crypto_alg,
+					   algt.alg.hash);
+	else
+		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+					   algt.alg.crypto);
 
 	/* update context with ptr to dev */
 	ctx->dev = talitos_alg->dev;
 
 	/* copy descriptor header template value */
-	ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
+	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+	return 0;
+}
+
+static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	talitos_cra_init(tfm);
 
 	/* random first IV */
 	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@
 	return 0;
 }
 
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	talitos_cra_init(tfm);
+
+	ctx->keylen = 0;
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct talitos_ahash_req_ctx));
+
+	return 0;
+}
+
 /*
  * given the alg's descriptor header template, determine whether descriptor
  * type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@
 	int i;
 
 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
-		crypto_unregister_alg(&t_alg->crypto_alg);
+		switch (t_alg->algt.type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_alg(&t_alg->algt.alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&t_alg->algt.alg.hash);
+			break;
+		}
 		list_del(&t_alg->entry);
 		kfree(t_alg);
 	}
@@ -1781,6 +2351,7 @@
 						    struct talitos_alg_template
 						           *template)
 {
+	struct talitos_private *priv = dev_get_drvdata(dev);
 	struct talitos_crypto_alg *t_alg;
 	struct crypto_alg *alg;
 
@@ -1788,16 +2359,36 @@
 	if (!t_alg)
 		return ERR_PTR(-ENOMEM);
 
-	alg = &t_alg->crypto_alg;
-	*alg = template->alg;
+	t_alg->algt = *template;
+
+	switch (t_alg->algt.type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg = &t_alg->algt.alg.crypto;
+		alg->cra_init = talitos_cra_init;
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		alg = &t_alg->algt.alg.crypto;
+		alg->cra_init = talitos_cra_init_aead;
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		alg = &t_alg->algt.alg.hash.halg.base;
+		alg->cra_init = talitos_cra_init_ahash;
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    !strcmp(alg->cra_name, "sha224")) {
+			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+			t_alg->algt.desc_hdr_template =
+					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+					DESC_HDR_SEL0_MDEUA |
+					DESC_HDR_MODE0_MDEU_SHA256;
+		}
+		break;
+	}
 
 	alg->cra_module = THIS_MODULE;
-	alg->cra_init = talitos_cra_init;
 	alg->cra_priority = TALITOS_CRA_PRIORITY;
 	alg->cra_alignmask = 0;
 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
 
-	t_alg->desc_hdr_template = template->desc_hdr_template;
 	t_alg->dev = dev;
 
 	return t_alg;
@@ -1877,7 +2468,8 @@
 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
 
 	if (of_device_is_compatible(np, "fsl,sec2.1"))
-		priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
+		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+				  TALITOS_FTR_SHA224_HWINIT;
 
 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
 			     priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@
 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
 			struct talitos_crypto_alg *t_alg;
+			char *name = NULL;
 
 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
 			if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@
 				goto err_out;
 			}
 
-			err = crypto_register_alg(&t_alg->crypto_alg);
+			switch (t_alg->algt.type) {
+			case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			case CRYPTO_ALG_TYPE_AEAD:
+				err = crypto_register_alg(
+						&t_alg->algt.alg.crypto);
+				name = t_alg->algt.alg.crypto.cra_driver_name;
+				break;
+			case CRYPTO_ALG_TYPE_AHASH:
+				err = crypto_register_ahash(
+						&t_alg->algt.alg.hash);
+				name =
+				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
+				break;
+			}
 			if (err) {
 				dev_err(dev, "%s alg registration failed\n",
-					t_alg->crypto_alg.cra_driver_name);
+					name);
 				kfree(t_alg);
 			} else {
 				list_add_tail(&t_alg->entry, &priv->alg_list);
-				dev_info(dev, "%s\n",
-					 t_alg->crypto_alg.cra_driver_name);
+				dev_info(dev, "%s\n", name);
 			}
 		}
 	}
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index ff5a145..0b746ac 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
 /*
  * Freescale SEC (talitos) device register and descriptor header defines
  *
- * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -130,6 +130,9 @@
 #define TALITOS_CRCUISR			0xf030 /* cyclic redundancy check unit*/
 #define TALITOS_CRCUISR_LO		0xf034
 
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256	0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512		0x48
+
 /*
  * talitos descriptor header (hdr) bits
  */
@@ -157,12 +160,16 @@
 #define	DESC_HDR_MODE0_AESU_CBC		cpu_to_be32(0x00200000)
 #define	DESC_HDR_MODE0_DEU_CBC		cpu_to_be32(0x00400000)
 #define	DESC_HDR_MODE0_DEU_3DES		cpu_to_be32(0x00200000)
+#define	DESC_HDR_MODE0_MDEU_CONT	cpu_to_be32(0x08000000)
 #define	DESC_HDR_MODE0_MDEU_INIT	cpu_to_be32(0x01000000)
 #define	DESC_HDR_MODE0_MDEU_HMAC	cpu_to_be32(0x00800000)
 #define	DESC_HDR_MODE0_MDEU_PAD		cpu_to_be32(0x00400000)
+#define	DESC_HDR_MODE0_MDEU_SHA224	cpu_to_be32(0x00300000)
 #define	DESC_HDR_MODE0_MDEU_MD5		cpu_to_be32(0x00200000)
 #define	DESC_HDR_MODE0_MDEU_SHA256	cpu_to_be32(0x00100000)
 #define	DESC_HDR_MODE0_MDEU_SHA1	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE0_MDEUB_SHA384	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE0_MDEUB_SHA512	cpu_to_be32(0x00200000)
 #define	DESC_HDR_MODE0_MDEU_MD5_HMAC	(DESC_HDR_MODE0_MDEU_MD5 | \
 					 DESC_HDR_MODE0_MDEU_HMAC)
 #define	DESC_HDR_MODE0_MDEU_SHA256_HMAC	(DESC_HDR_MODE0_MDEU_SHA256 | \
@@ -181,9 +188,12 @@
 #define	DESC_HDR_MODE1_MDEU_INIT	cpu_to_be32(0x00001000)
 #define	DESC_HDR_MODE1_MDEU_HMAC	cpu_to_be32(0x00000800)
 #define	DESC_HDR_MODE1_MDEU_PAD		cpu_to_be32(0x00000400)
+#define	DESC_HDR_MODE1_MDEU_SHA224	cpu_to_be32(0x00000300)
 #define	DESC_HDR_MODE1_MDEU_MD5		cpu_to_be32(0x00000200)
 #define	DESC_HDR_MODE1_MDEU_SHA256	cpu_to_be32(0x00000100)
 #define	DESC_HDR_MODE1_MDEU_SHA1	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE1_MDEUB_SHA384	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE1_MDEUB_SHA512	cpu_to_be32(0x00000200)
 #define	DESC_HDR_MODE1_MDEU_MD5_HMAC	(DESC_HDR_MODE1_MDEU_MD5 | \
 					 DESC_HDR_MODE1_MDEU_HMAC)
 #define	DESC_HDR_MODE1_MDEU_SHA256_HMAC	(DESC_HDR_MODE1_MDEU_SHA256 | \
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index fb09bb3..aa9bc9e 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -149,7 +149,7 @@
 	return count;
 }
 
-static ssize_t smi_data_read(struct kobject *kobj,
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr,
 			     char *buf, loff_t pos, size_t count)
 {
@@ -162,7 +162,7 @@
 	return ret;
 }
 
-static ssize_t smi_data_write(struct kobject *kobj,
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t pos, size_t count)
 {
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3a44602..2f452f1 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -522,7 +522,7 @@
 			rbu_data.image_update_buffer, rbu_data.bios_image_size);
 }
 
-static ssize_t read_rbu_data(struct kobject *kobj,
+static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr,
 			     char *buffer, loff_t pos, size_t count)
 {
@@ -576,7 +576,7 @@
 	release_firmware(fw);
 }
 
-static ssize_t read_rbu_image_type(struct kobject *kobj,
+static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
 				   struct bin_attribute *bin_attr,
 				   char *buffer, loff_t pos, size_t count)
 {
@@ -586,7 +586,7 @@
 	return size;
 }
 
-static ssize_t write_rbu_image_type(struct kobject *kobj,
+static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
 				    struct bin_attribute *bin_attr,
 				    char *buffer, loff_t pos, size_t count)
 {
@@ -647,7 +647,7 @@
 	return rc;
 }
 
-static ssize_t read_rbu_packet_size(struct kobject *kobj,
+static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
 				    struct bin_attribute *bin_attr,
 				    char *buffer, loff_t pos, size_t count)
 {
@@ -660,7 +660,7 @@
 	return size;
 }
 
-static ssize_t write_rbu_packet_size(struct kobject *kobj,
+static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
 				     struct bin_attribute *bin_attr,
 				     char *buffer, loff_t pos, size_t count)
 {
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 81b70bd..2a62ec6 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -402,7 +402,7 @@
 }
 
 
-static ssize_t efivar_create(struct kobject *kobj,
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr,
 			     char *buf, loff_t pos, size_t count)
 {
@@ -461,7 +461,7 @@
 	return count;
 }
 
-static ssize_t efivar_delete(struct kobject *kobj,
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr,
 			     char *buf, loff_t pos, size_t count)
 {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb0c3fe..cae1b8c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -399,7 +399,7 @@
 			goto free_id;
 		}
 
-		pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+		pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
 		if (!pdesc->value_sd) {
 			ret = -ENODEV;
 			goto free_id;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c590..88910e5 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@
 	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
 	select I2C
 	select I2C_ALGOBIT
+	select SLOW_WORK
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@
 	select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_TTM
+	select POWER_SUPPLY
 	help
 	  Choose this option if you have an ATI Radeon graphics card.  There
 	  are both PCI and AGP versions.  You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa..3f46772 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@
 	struct drm_device *dev = master->minor->dev;
 	DRM_DEBUG("%d\n", magic);
 
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
 		return -ENOMEM;
-	memset(entry, 0, sizeof(*entry));
 	entry->priv = priv;
 	entry->hash_item.key = (unsigned long)magic;
 	mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcf..994d23b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
 #include "drm.h"
 #include "drmP.h"
 #include "drm_crtc.h"
+#include "drm_edid.h"
 
 struct drm_prop_enum_list {
 	int type;
@@ -494,7 +495,6 @@
 	list_for_each_entry_safe(mode, t, &connector->user_modes, head)
 		drm_mode_remove(connector, mode);
 
-	kfree(connector->fb_helper_private);
 	mutex_lock(&dev->mode_config.mutex);
 	drm_mode_object_put(dev, &connector->base);
 	list_del(&connector->head);
@@ -858,7 +858,6 @@
 	mutex_init(&dev->mode_config.mutex);
 	mutex_init(&dev->mode_config.idr_mutex);
 	INIT_LIST_HEAD(&dev->mode_config.fb_list);
-	INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
 	INIT_LIST_HEAD(&dev->mode_config.crtc_list);
 	INIT_LIST_HEAD(&dev->mode_config.connector_list);
 	INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@
 					    struct edid *edid)
 {
 	struct drm_device *dev = connector->dev;
-	int ret = 0;
+	int ret = 0, size;
 
 	if (connector->edid_blob_ptr)
 		drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@
 		return ret;
 	}
 
-	connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+	size = EDID_LENGTH * (1 + edid->extensions);
+	connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+							    size, edid);
 
 	ret = drm_connector_property_set_value(connector,
 					       dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa..7644019 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@
 }
 
 /**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
  * @dev: DRM device
  * @maxX: max width for modes
  * @maxY: max height for modes
@@ -154,21 +154,6 @@
 }
 EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
-				      uint32_t maxY)
-{
-	struct drm_connector *connector;
-	int count = 0;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		count += drm_helper_probe_single_connector_modes(connector,
-								 maxX, maxY);
-	}
-
-	return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
 /**
  * drm_helper_encoder_in_use - check if a given encoder is in use
  * @encoder: encoder to check
@@ -263,302 +248,6 @@
 }
 EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
-	struct drm_display_mode *mode;
-
-	list_for_each_entry(mode, &connector->modes, head) {
-		if (drm_mode_width(mode) > width ||
-		    drm_mode_height(mode) > height)
-			continue;
-		if (mode->type & DRM_MODE_TYPE_PREFERRED)
-			return mode;
-	}
-	return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
-	struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-	struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
-	if (!fb_help_conn)
-		return false;
-
-	cmdline_mode = &fb_help_conn->cmdline_mode;
-	return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
-	struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-	struct drm_fb_helper_cmdline_mode *cmdline_mode;
-	struct drm_display_mode *mode = NULL;
-
-	if (!fb_help_conn)
-		return mode;
-
-	cmdline_mode = &fb_help_conn->cmdline_mode;
-	if (cmdline_mode->specified == false)
-		return mode;
-
-	/* attempt to find a matching mode in the list of modes
-	 *  we have gotten so far, if not add a CVT mode that conforms
-	 */
-	if (cmdline_mode->rb || cmdline_mode->margins)
-		goto create_mode;
-
-	list_for_each_entry(mode, &connector->modes, head) {
-		/* check width/height */
-		if (mode->hdisplay != cmdline_mode->xres ||
-		    mode->vdisplay != cmdline_mode->yres)
-			continue;
-
-		if (cmdline_mode->refresh_specified) {
-			if (mode->vrefresh != cmdline_mode->refresh)
-				continue;
-		}
-
-		if (cmdline_mode->interlace) {
-			if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
-				continue;
-		}
-		return mode;
-	}
-
-create_mode:
-	mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
-			    cmdline_mode->yres,
-			    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
-			    cmdline_mode->rb, cmdline_mode->interlace,
-			    cmdline_mode->margins);
-	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-	list_add(&mode->head, &connector->modes);
-	return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
-	bool enable;
-
-	if (strict) {
-		enable = connector->status == connector_status_connected;
-	} else {
-		enable = connector->status != connector_status_disconnected;
-	}
-	return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
-	bool any_enabled = false;
-	struct drm_connector *connector;
-	int i = 0;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		enabled[i] = drm_connector_enabled(connector, true);
-		DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
-			  enabled[i] ? "yes" : "no");
-		any_enabled |= enabled[i];
-		i++;
-	}
-
-	if (any_enabled)
-		return;
-
-	i = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		enabled[i] = drm_connector_enabled(connector, false);
-		i++;
-	}
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
-				 struct drm_display_mode **modes,
-				 bool *enabled, int width, int height)
-{
-	struct drm_connector *connector;
-	int i = 0;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
-		if (enabled[i] == false) {
-			i++;
-			continue;
-		}
-
-		DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
-			      connector->base.id);
-
-		/* got for command line mode first */
-		modes[i] = drm_pick_cmdline_mode(connector, width, height);
-		if (!modes[i]) {
-			DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
-				      connector->base.id);
-			modes[i] = drm_has_preferred_mode(connector, width, height);
-		}
-		/* No preferred modes, pick one off the list */
-		if (!modes[i] && !list_empty(&connector->modes)) {
-			list_for_each_entry(modes[i], &connector->modes, head)
-				break;
-		}
-		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
-			  "none");
-		i++;
-	}
-	return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
-			  struct drm_crtc **best_crtcs,
-			  struct drm_display_mode **modes,
-			  int n, int width, int height)
-{
-	int c, o;
-	struct drm_connector *connector;
-	struct drm_connector_helper_funcs *connector_funcs;
-	struct drm_encoder *encoder;
-	struct drm_crtc *best_crtc;
-	int my_score, best_score, score;
-	struct drm_crtc **crtcs, *crtc;
-
-	if (n == dev->mode_config.num_connector)
-		return 0;
-	c = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (c == n)
-			break;
-		c++;
-	}
-
-	best_crtcs[n] = NULL;
-	best_crtc = NULL;
-	best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
-	if (modes[n] == NULL)
-		return best_score;
-
-	crtcs = kmalloc(dev->mode_config.num_connector *
-			sizeof(struct drm_crtc *), GFP_KERNEL);
-	if (!crtcs)
-		return best_score;
-
-	my_score = 1;
-	if (connector->status == connector_status_connected)
-		my_score++;
-	if (drm_has_cmdline_mode(connector))
-		my_score++;
-	if (drm_has_preferred_mode(connector, width, height))
-		my_score++;
-
-	connector_funcs = connector->helper_private;
-	encoder = connector_funcs->best_encoder(connector);
-	if (!encoder)
-		goto out;
-
-	connector->encoder = encoder;
-
-	/* select a crtc for this connector and then attempt to configure
-	   remaining connectors */
-	c = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
-		if ((encoder->possible_crtcs & (1 << c)) == 0) {
-			c++;
-			continue;
-		}
-
-		for (o = 0; o < n; o++)
-			if (best_crtcs[o] == crtc)
-				break;
-
-		if (o < n) {
-			/* ignore cloning for now */
-			c++;
-			continue;
-		}
-
-		crtcs[n] = crtc;
-		memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
-		score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
-						  width, height);
-		if (score > best_score) {
-			best_crtc = crtc;
-			best_score = score;
-			memcpy(best_crtcs, crtcs,
-			       dev->mode_config.num_connector *
-			       sizeof(struct drm_crtc *));
-		}
-		c++;
-	}
-out:
-	kfree(crtcs);
-	return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
-	struct drm_crtc **crtcs;
-	struct drm_display_mode **modes;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	bool *enabled;
-	int width, height;
-	int i, ret;
-
-	DRM_DEBUG_KMS("\n");
-
-	width = dev->mode_config.max_width;
-	height = dev->mode_config.max_height;
-
-	/* clean out all the encoder/crtc combos */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		encoder->crtc = NULL;
-	}
-
-	crtcs = kcalloc(dev->mode_config.num_connector,
-			sizeof(struct drm_crtc *), GFP_KERNEL);
-	modes = kcalloc(dev->mode_config.num_connector,
-			sizeof(struct drm_display_mode *), GFP_KERNEL);
-	enabled = kcalloc(dev->mode_config.num_connector,
-			  sizeof(bool), GFP_KERNEL);
-
-	drm_enable_connectors(dev, enabled);
-
-	ret = drm_target_preferred(dev, modes, enabled, width, height);
-	if (!ret)
-		DRM_ERROR("Unable to find initial modes\n");
-
-	DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
-	drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
-	i = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_display_mode *mode = modes[i];
-		struct drm_crtc *crtc = crtcs[i];
-
-		if (connector->encoder == NULL) {
-			i++;
-			continue;
-		}
-
-		if (mode && crtc) {
-			DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
-				  mode->name, crtc->base.id);
-			crtc->desired_mode = mode;
-			connector->encoder->crtc = crtc;
-		} else {
-			connector->encoder->crtc = NULL;
-			connector->encoder = NULL;
-		}
-		i++;
-	}
-
-	kfree(crtcs);
-	kfree(modes);
-	kfree(enabled);
-}
-
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
  * @encoder: encoder to test
@@ -936,10 +625,6 @@
 				ret = -EINVAL;
 				goto fail;
 			}
-			/* TODO are these needed? */
-			set->crtc->desired_x = set->x;
-			set->crtc->desired_y = set->y;
-			set->crtc->desired_mode = set->mode;
 		}
 		drm_helper_disable_unused_functions(dev);
 	} else if (fb_changed) {
@@ -984,63 +669,6 @@
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
 
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
-	DRM_DEBUG_KMS("\n");
-
-	drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
-					 dev->mode_config.max_height);
-
-	drm_setup_crtcs(dev);
-
-	/* alert the driver fb layer */
-	dev->mode_config.funcs->fb_changed(dev);
-
-	/* FIXME: send hotplug event */
-	return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
-	int count = 0;
-
-	/* disable all the possible outputs/crtcs before entering KMS mode */
-	drm_helper_disable_unused_functions(dev);
-
-	drm_fb_helper_parse_command_line(dev);
-
-	count = drm_helper_probe_connector_modes(dev,
-						 dev->mode_config.max_width,
-						 dev->mode_config.max_height);
-
-	/*
-	 * we shouldn't end up with no modes here.
-	 */
-	if (count == 0)
-		printk(KERN_INFO "No connectors reported connected with modes\n");
-
-	drm_setup_crtcs(dev);
-
-	/* alert the driver fb layer */
-	dev->mode_config.funcs->fb_changed(dev);
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 {
 	int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
-	drm_helper_plugged_event(dev);
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
 int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 				   struct drm_mode_fb_cmd *mode_cmd)
 {
@@ -1200,3 +807,98 @@
 	return 0;
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+	struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+	struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+	struct drm_connector *connector;
+	enum drm_connector_status old_status, status;
+	bool repoll = false, changed = false;
+	int ret;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* if this is HPD or polled don't check it -
+		   TV out for instance */
+		if (!connector->polled)
+			continue;
+
+		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+			repoll = true;
+
+		old_status = connector->status;
+		/* if we are connected and don't want to poll for disconnect
+		   skip it */
+		if (old_status == connector_status_connected &&
+		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		status = connector->funcs->detect(connector);
+		if (old_status != status)
+			changed = true;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (changed) {
+		/* send a uevent + call fbdev */
+		drm_sysfs_hotplug_event(dev);
+		if (dev->mode_config.funcs->output_poll_changed)
+			dev->mode_config.funcs->output_poll_changed(dev);
+	}
+
+	if (repoll) {
+		ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+		if (ret)
+			DRM_ERROR("delayed enqueue failed %d\n", ret);
+	}
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	bool poll = false;
+	int ret;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->polled)
+			poll = true;
+	}
+	slow_work_register_user(THIS_MODULE);
+	delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+			       &output_poll_ops);
+
+	if (poll) {
+		ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+		if (ret)
+			DRM_ERROR("delayed enqueue failed %d\n", ret);
+	}
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+	delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+	slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+	if (!dev->mode_config.poll_enabled)
+		return;
+	delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+	/* schedule a slow work asap */
+	delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+	.execute = output_poll_execute,
+};
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537..252cbd7 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@
 {
 	int i;
 
-	dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+	dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
 	if (!dev->dma)
 		return -ENOMEM;
 
-	memset(dev->dma, 0, sizeof(*dev->dma));
-
 	for (i = 0; i <= DRM_MAX_ORDER; i++)
 		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7..f569ae8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
  * Copyright (c) 2006 Luc Verhaegen (quirks list)
  * Copyright (c) 2007-2008 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
  *
  * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
  * FB layer.
@@ -33,10 +34,9 @@
 #include "drmP.h"
 #include "drm_edid.h"
 
-/*
- * TODO:
- *   - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
 
 /*
  * EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
 
 #define LEVEL_DMT	0
 #define LEVEL_GTF	1
-#define LEVEL_CVT	2
+#define LEVEL_GTF2	2
+#define LEVEL_CVT	3
 
 static struct edid_quirk {
 	char *vendor;
@@ -109,36 +110,38 @@
 	{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
 };
 
+/*** DDC fetch and block validation ***/
 
-/* Valid EDID header has these bytes */
 static const u8 edid_header[] = {
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
 };
 
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension).  Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
  */
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
 {
-	int i, score = 0;
+	int i;
 	u8 csum = 0;
-	u8 *raw_edid = (u8 *)edid;
+	struct edid *edid = (struct edid *)raw_edid;
 
-	for (i = 0; i < sizeof(edid_header); i++)
-		if (raw_edid[i] == edid_header[i])
-			score++;
+	if (raw_edid[0] == 0x00) {
+		int score = 0;
 
-	if (score == 8) ;
-	else if (score >= 6) {
-		DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
-		memcpy(raw_edid, edid_header, sizeof(edid_header));
-	} else
-		goto bad;
+		for (i = 0; i < sizeof(edid_header); i++)
+			if (raw_edid[i] == edid_header[i])
+				score++;
+
+		if (score == 8) ;
+		else if (score >= 6) {
+			DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+			memcpy(raw_edid, edid_header, sizeof(edid_header));
+		} else {
+			goto bad;
+		}
+	}
 
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
@@ -147,13 +150,21 @@
 		goto bad;
 	}
 
-	if (edid->version != 1) {
-		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
-		goto bad;
-	}
+	/* per-block-type checks */
+	switch (raw_edid[0]) {
+	case 0: /* base */
+		if (edid->version != 1) {
+			DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+			goto bad;
+		}
 
-	if (edid->revision > 4)
-		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+		if (edid->revision > 4)
+			DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+		break;
+
+	default:
+		break;
+	}
 
 	return 1;
 
@@ -165,8 +176,158 @@
 	}
 	return 0;
 }
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+	int i;
+	u8 *raw = (u8 *)edid;
+
+	if (!edid)
+		return false;
+
+	for (i = 0; i <= edid->extensions; i++)
+		if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+			return false;
+
+	return true;
+}
 EXPORT_SYMBOL(drm_edid_is_valid);
 
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+		      int block, int len)
+{
+	unsigned char start = block * EDID_LENGTH;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= DDC_ADDR,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= &start,
+		}, {
+			.addr	= DDC_ADDR,
+			.flags	= I2C_M_RD,
+			.len	= len,
+			.buf	= buf + start,
+		}
+	};
+
+	if (i2c_transfer(adapter, msgs, 2) == 2)
+		return 0;
+
+	return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+	int i, j = 0;
+	u8 *block, *new;
+
+	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+		return NULL;
+
+	/* base block fetch */
+	for (i = 0; i < 4; i++) {
+		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+			goto out;
+		if (drm_edid_block_valid(block))
+			break;
+	}
+	if (i == 4)
+		goto carp;
+
+	/* if there's no extensions, we're done */
+	if (block[0x7e] == 0)
+		return block;
+
+	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+	if (!new)
+		goto out;
+	block = new;
+
+	for (j = 1; j <= block[0x7e]; j++) {
+		for (i = 0; i < 4; i++) {
+			if (drm_do_probe_ddc_edid(adapter, block, j,
+						  EDID_LENGTH))
+				goto out;
+			if (drm_edid_block_valid(block + j * EDID_LENGTH))
+				break;
+		}
+		if (i == 4)
+			goto carp;
+	}
+
+	return block;
+
+carp:
+	dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+		 drm_get_connector_name(connector), j);
+
+out:
+	kfree(block);
+	return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+	unsigned char out;
+
+	return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible.  If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+			  struct i2c_adapter *adapter)
+{
+	struct edid *edid = NULL;
+
+	if (drm_probe_ddc(adapter))
+		edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+	connector->display_info.raw_edid = (char *)edid;
+
+	return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
 /**
  * edid_vendor - match a string against EDID's obfuscated vendor field
  * @edid: EDID to match
@@ -335,7 +496,7 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1024x768@85Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
-		   1072, 1376, 0, 768, 769, 772, 808, 0,
+		   1168, 1376, 0, 768, 769, 772, 808, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1152x864@75Hz */
 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +587,7 @@
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1600x1200@75Hz */
-	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1600x1200@85Hz */
@@ -497,8 +658,8 @@
 static const int drm_num_dmt_modes =
 	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
-			int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+					   int hsize, int vsize, int fresh)
 {
 	int i;
 	struct drm_display_mode *ptr, *mode;
@@ -516,6 +677,111 @@
 	}
 	return mode;
 }
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+	int i;
+	struct edid *edid = (struct edid *)raw_edid;
+
+	if (edid == NULL)
+		return;
+
+	for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+		cb(&(edid->detailed_timings[i]), closure);
+
+	/* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+	u8 *r = (u8 *)t;
+	if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+		if (r[15] & 0x10)
+			*(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+	if (edid->revision >= 4) {
+		bool ret;
+		drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+		return ret;
+	}
+
+	return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+	u8 *r = (u8 *)t;
+	if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+		*(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+	if (edid->revision >= 2) {
+		if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+			return LEVEL_CVT;
+		if (drm_gtf2_hbreak(edid))
+			return LEVEL_GTF2;
+		return LEVEL_GTF;
+	}
+	return LEVEL_DMT;
+}
 
 /*
  * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
@@ -536,22 +802,20 @@
  *
  * Take the standard timing params (in this case width, aspect, and refresh)
  * and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
  */
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
-				      struct std_timing *t,
-				      int revision,
-				      int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+	     struct std_timing *t, int revision)
 {
-	struct drm_display_mode *mode;
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *m, *mode = NULL;
 	int hsize, vsize;
 	int vrefresh_rate;
 	unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
 		>> EDID_TIMING_ASPECT_SHIFT;
 	unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
 		>> EDID_TIMING_VFREQ_SHIFT;
+	int timing_level = standard_timing_level(edid);
 
 	if (bad_std_timing(t->hsize, t->vfreq_aspect))
 		return NULL;
@@ -572,18 +836,38 @@
 		vsize = (hsize * 4) / 5;
 	else
 		vsize = (hsize * 9) / 16;
-	/* HDTV hack */
-	if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
-		mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+	/* HDTV hack, part 1 */
+	if (vrefresh_rate == 60 &&
+	    ((hsize == 1360 && vsize == 765) ||
+	     (hsize == 1368 && vsize == 769))) {
+		hsize = 1366;
+		vsize = 768;
+	}
+
+	/*
+	 * If this connector already has a mode for this size and refresh
+	 * rate (because it came from detailed or CVT info), use that
+	 * instead.  This way we don't have to guess at interlace or
+	 * reduced blanking.
+	 */
+	list_for_each_entry(m, &connector->probed_modes, head)
+		if (m->hdisplay == hsize && m->vdisplay == vsize &&
+		    drm_mode_vrefresh(m) == vrefresh_rate)
+			return NULL;
+
+	/* HDTV hack, part 2 */
+	if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+		mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
 				    false);
 		mode->hdisplay = 1366;
 		mode->vsync_start = mode->vsync_start - 1;
 		mode->vsync_end = mode->vsync_end - 1;
 		return mode;
 	}
-	mode = NULL;
+
 	/* check whether it can be found in default mode table */
-	mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+	mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
 	if (mode)
 		return mode;
 
@@ -593,6 +877,23 @@
 	case LEVEL_GTF:
 		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
 		break;
+	case LEVEL_GTF2:
+		/*
+		 * This is potentially wrong if there's ever a monitor with
+		 * more than one ranges section, each claiming a different
+		 * secondary GTF curve.  Please don't do that.
+		 */
+		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+		if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+			kfree(mode);
+			mode = drm_gtf_mode_complex(dev, hsize, vsize,
+						    vrefresh_rate, 0, 0,
+						    drm_gtf2_m(edid),
+						    drm_gtf2_2c(edid),
+						    drm_gtf2_k(edid),
+						    drm_gtf2_2j(edid));
+		}
+		break;
 	case LEVEL_CVT:
 		mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
 				    false);
@@ -716,10 +1017,10 @@
 	if (mode->vsync_end > mode->vtotal)
 		mode->vtotal = mode->vsync_end + 1;
 
-	drm_mode_set_name(mode);
-
 	drm_mode_do_interlace_quirk(mode, pt);
 
+	drm_mode_set_name(mode);
+
 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
 	}
@@ -802,10 +1103,6 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
 };
 
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
 /**
  * add_established_modes - get est. modes from EDID and add them
  * @edid: EDID block to scan
@@ -833,19 +1130,6 @@
 
 	return modes;
 }
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
-	if (edid->revision >= 2) {
-		if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
-			return LEVEL_CVT;
-		return LEVEL_GTF;
-	}
-	return LEVEL_DMT;
-}
 
 /**
  * add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1140,14 @@
  */
 static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
 {
-	struct drm_device *dev = connector->dev;
 	int i, modes = 0;
-	int timing_level;
-
-	timing_level = standard_timing_level(edid);
 
 	for (i = 0; i < EDID_STD_TIMINGS; i++) {
-		struct std_timing *t = &edid->standard_timings[i];
 		struct drm_display_mode *newmode;
 
-		/* If std timings bytes are 1, 1 it's empty */
-		if (t->hsize == 1 && t->vfreq_aspect == 1)
-			continue;
-
-		newmode = drm_mode_std(dev, &edid->standard_timings[i],
-				       edid->revision, timing_level);
+		newmode = drm_mode_std(connector, edid,
+				       &edid->standard_timings[i],
+				       edid->revision);
 		if (newmode) {
 			drm_mode_probed_add(connector, newmode);
 			modes++;
@@ -881,36 +1157,86 @@
 	return modes;
 }
 
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
 static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
 {
-	struct detailed_data_monitor_range *range;
-	int hsync, vrefresh;
+	return (mode->htotal - mode->hdisplay == 160) &&
+	       (mode->hsync_end - mode->hdisplay == 80) &&
+	       (mode->hsync_end - mode->hsync_start == 32) &&
+	       (mode->vsync_start - mode->vdisplay == 3);
+}
 
-	range = &timing->data.other_data.data.range;
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+	int hsync, hmin, hmax;
 
+	hmin = t[7];
+	if (edid->revision >= 4)
+	    hmin += ((t[4] & 0x04) ? 255 : 0);
+	hmax = t[8];
+	if (edid->revision >= 4)
+	    hmax += ((t[4] & 0x08) ? 255 : 0);
 	hsync = drm_mode_hsync(mode);
-	vrefresh = drm_mode_vrefresh(mode);
 
-	if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+	return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+	int vsync, vmin, vmax;
+
+	vmin = t[5];
+	if (edid->revision >= 4)
+	    vmin += ((t[4] & 0x01) ? 255 : 0);
+	vmax = t[6];
+	if (edid->revision >= 4)
+	    vmax += ((t[4] & 0x02) ? 255 : 0);
+	vsync = drm_mode_vrefresh(mode);
+
+	return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+	/* unspecified */
+	if (t[9] == 0 || t[9] == 255)
+		return 0;
+
+	/* 1.4 with CVT support gives us real precision, yay */
+	if (edid->revision >= 4 && t[10] == 0x04)
+		return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+	/* 1.3 is pathetic, so fuzz up a bit */
+	return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+	      struct detailed_timing *timing)
+{
+	u32 max_clock;
+	u8 *t = (u8 *)timing;
+
+	if (!mode_in_hsync_range(mode, edid, t))
 		return false;
 
-	if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+	if (!mode_in_vsync_range(mode, edid, t))
 		return false;
 
-	if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
-		/* be forgiving since it's in units of 10MHz */
-		int max_clock = range->pixel_clock_mhz * 10 + 9;
-		max_clock *= 1000;
+	if ((max_clock = range_pixel_clock(edid, t)))
 		if (mode->clock > max_clock)
 			return false;
-	}
+
+	/* 1.4 max horizontal check */
+	if (edid->revision >= 4 && t[10] == 0x04)
+		if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+			return false;
+
+	if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+		return false;
 
 	return true;
 }
@@ -919,15 +1245,16 @@
  * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
  * need to account for them.
  */
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
-				   struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
 {
 	int i, modes = 0;
 	struct drm_display_mode *newmode;
 	struct drm_device *dev = connector->dev;
 
 	for (i = 0; i < drm_num_dmt_modes; i++) {
-		if (mode_in_range(drm_dmt_modes + i, timing)) {
+		if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
 			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
 			if (newmode) {
 				drm_mode_probed_add(connector, newmode);
@@ -988,13 +1315,100 @@
 	return modes;
 }
 
+static const struct {
+	short w;
+	short h;
+	short r;
+	short rb;
+} est3_modes[] = {
+	/* byte 6 */
+	{ 640, 350, 85, 0 },
+	{ 640, 400, 85, 0 },
+	{ 720, 400, 85, 0 },
+	{ 640, 480, 85, 0 },
+	{ 848, 480, 60, 0 },
+	{ 800, 600, 85, 0 },
+	{ 1024, 768, 85, 0 },
+	{ 1152, 864, 75, 0 },
+	/* byte 7 */
+	{ 1280, 768, 60, 1 },
+	{ 1280, 768, 60, 0 },
+	{ 1280, 768, 75, 0 },
+	{ 1280, 768, 85, 0 },
+	{ 1280, 960, 60, 0 },
+	{ 1280, 960, 85, 0 },
+	{ 1280, 1024, 60, 0 },
+	{ 1280, 1024, 85, 0 },
+	/* byte 8 */
+	{ 1360, 768, 60, 0 },
+	{ 1440, 900, 60, 1 },
+	{ 1440, 900, 60, 0 },
+	{ 1440, 900, 75, 0 },
+	{ 1440, 900, 85, 0 },
+	{ 1400, 1050, 60, 1 },
+	{ 1400, 1050, 60, 0 },
+	{ 1400, 1050, 75, 0 },
+	/* byte 9 */
+	{ 1400, 1050, 85, 0 },
+	{ 1680, 1050, 60, 1 },
+	{ 1680, 1050, 60, 0 },
+	{ 1680, 1050, 75, 0 },
+	{ 1680, 1050, 85, 0 },
+	{ 1600, 1200, 60, 0 },
+	{ 1600, 1200, 65, 0 },
+	{ 1600, 1200, 70, 0 },
+	/* byte 10 */
+	{ 1600, 1200, 75, 0 },
+	{ 1600, 1200, 85, 0 },
+	{ 1792, 1344, 60, 0 },
+	{ 1792, 1344, 85, 0 },
+	{ 1856, 1392, 60, 0 },
+	{ 1856, 1392, 75, 0 },
+	{ 1920, 1200, 60, 1 },
+	{ 1920, 1200, 60, 0 },
+	/* byte 11 */
+	{ 1920, 1200, 75, 0 },
+	{ 1920, 1200, 85, 0 },
+	{ 1920, 1440, 60, 0 },
+	{ 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+	int i, j, m, modes = 0;
+	struct drm_display_mode *mode;
+	u8 *est = ((u8 *)timing) + 5;
+
+	for (i = 0; i < 6; i++) {
+		for (j = 7; j > 0; j--) {
+			m = (i * 8) + (7 - j);
+			if (m >= num_est3_modes)
+				break;
+			if (est[i] & (1 << j)) {
+				mode = drm_mode_find_dmt(connector->dev,
+							 est3_modes[m].w,
+							 est3_modes[m].h,
+							 est3_modes[m].r
+							 /*, est3_modes[m].rb */);
+				if (mode) {
+					drm_mode_probed_add(connector, mode);
+					modes++;
+				}
+			}
+		}
+	}
+
+	return modes;
+}
+
 static int add_detailed_modes(struct drm_connector *connector,
 			      struct detailed_timing *timing,
 			      struct edid *edid, u32 quirks, int preferred)
 {
 	int i, modes = 0;
 	struct detailed_non_pixel *data = &timing->data.other_data;
-	int timing_level = standard_timing_level(edid);
 	int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
 	struct drm_display_mode *newmode;
 	struct drm_device *dev = connector->dev;
@@ -1015,7 +1429,8 @@
 	switch (data->type) {
 	case EDID_DETAIL_MONITOR_RANGE:
 		if (gtf)
-			modes += drm_gtf_modes_for_range(connector, timing);
+			modes += drm_gtf_modes_for_range(connector, edid,
+							 timing);
 		break;
 	case EDID_DETAIL_STD_MODES:
 		/* Six modes per detailed section */
@@ -1024,8 +1439,8 @@
 			struct drm_display_mode *newmode;
 
 			std = &data->data.timings[i];
-			newmode = drm_mode_std(dev, std, edid->revision,
-					       timing_level);
+			newmode = drm_mode_std(connector, edid, std,
+					       edid->revision);
 			if (newmode) {
 				drm_mode_probed_add(connector, newmode);
 				modes++;
@@ -1035,6 +1450,9 @@
 	case EDID_DETAIL_CVT_3BYTE:
 		modes += drm_cvt_modes(connector, timing);
 		break;
+	case EDID_DETAIL_EST_TIMINGS:
+		modes += drm_est3_modes(connector, timing);
+		break;
 	default:
 		break;
 	}
@@ -1058,7 +1476,10 @@
 
 	for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
 		struct detailed_timing *timing = &edid->detailed_timings[i];
-		int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+		int preferred = (i == 0);
+
+		if (preferred && edid->version == 1 && edid->revision < 4)
+			preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
 
 		/* In 1.0, only timings are allowed */
 		if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1509,22 @@
 	int i, modes = 0;
 	char *edid_ext = NULL;
 	struct detailed_timing *timing;
-	int edid_ext_num;
 	int start_offset, end_offset;
-	int timing_level;
 
-	if (edid->version == 1 && edid->revision < 3) {
-		/* If the EDID version is less than 1.3, there is no
-		 * extension EDID.
-		 */
+	if (edid->version == 1 && edid->revision < 3)
 		return 0;
-	}
-	if (!edid->extensions) {
-		/* if there is no extension EDID, it is unnecessary to
-		 * parse the E-EDID to get detailed info
-		 */
+	if (!edid->extensions)
 		return 0;
-	}
-
-	/* Chose real EDID extension number */
-	edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
-		DRM_MAX_EDID_EXT_NUM : edid->extensions;
 
 	/* Find CEA extension */
-	for (i = 0; i < edid_ext_num; i++) {
+	for (i = 0; i < edid->extensions; i++) {
 		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
-		/* This block is CEA extension */
 		if (edid_ext[0] == 0x02)
 			break;
 	}
 
-	if (i == edid_ext_num) {
-		/* if there is no additional timing EDID block, return */
+	if (i == edid->extensions)
 		return 0;
-	}
 
 	/* Get the start offset of detailed timing block */
 	start_offset = edid_ext[2];
@@ -1132,7 +1536,6 @@
 		return 0;
 	}
 
-	timing_level = standard_timing_level(edid);
 	end_offset = EDID_LENGTH;
 	end_offset -= sizeof(struct detailed_timing);
 	for (i = start_offset; i < end_offset;
@@ -1144,123 +1547,6 @@
 	return modes;
 }
 
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf     : EDID data buffer to be filled
- * \param len     : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
-			  unsigned char *buf, int len)
-{
-	unsigned char start = 0x0;
-	struct i2c_msg msgs[] = {
-		{
-			.addr	= DDC_ADDR,
-			.flags	= 0,
-			.len	= 1,
-			.buf	= &start,
-		}, {
-			.addr	= DDC_ADDR,
-			.flags	= I2C_M_RD,
-			.len	= len,
-			.buf	= buf,
-		}
-	};
-
-	if (i2c_transfer(adapter, msgs, 2) == 2)
-		return 0;
-
-	return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
-			     struct i2c_adapter *adapter,
-			     char *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < 4; i++) {
-		if (drm_do_probe_ddc_edid(adapter, buf, len))
-			return -1;
-		if (drm_edid_is_valid((struct edid *)buf))
-			return 0;
-	}
-
-	/* repeated checksum failures; warn, but carry on */
-	dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
-		 drm_get_connector_name(connector));
-	return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
-			  struct i2c_adapter *adapter)
-{
-	int ret;
-	struct edid *edid;
-
-	edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
-		       GFP_KERNEL);
-	if (edid == NULL) {
-		dev_warn(&connector->dev->pdev->dev,
-			 "Failed to allocate EDID\n");
-		goto end;
-	}
-
-	/* Read first EDID block */
-	ret = drm_ddc_read_edid(connector, adapter,
-				(unsigned char *)edid, EDID_LENGTH);
-	if (ret != 0)
-		goto clean_up;
-
-	/* There are EDID extensions to be read */
-	if (edid->extensions != 0) {
-		int edid_ext_num = edid->extensions;
-
-		if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
-			dev_warn(&connector->dev->pdev->dev,
-				 "The number of extension(%d) is "
-				 "over max (%d), actually read number (%d)\n",
-				 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
-				 DRM_MAX_EDID_EXT_NUM);
-			/* Reset EDID extension number to be read */
-			edid_ext_num = DRM_MAX_EDID_EXT_NUM;
-		}
-		/* Read EDID including extensions too */
-		ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
-					EDID_LENGTH * (edid_ext_num + 1));
-		if (ret != 0)
-			goto clean_up;
-
-	}
-
-	connector->display_info.raw_edid = (char *)edid;
-	goto end;
-
-clean_up:
-	kfree(edid);
-	edid = NULL;
-end:
-	return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
 #define HDMI_IDENTIFIER 0x000C03
 #define VENDOR_BLOCK    0x03
 /**
@@ -1273,7 +1559,7 @@
 bool drm_detect_hdmi_monitor(struct edid *edid)
 {
 	char *edid_ext = NULL;
-	int i, hdmi_id, edid_ext_num;
+	int i, hdmi_id;
 	int start_offset, end_offset;
 	bool is_hdmi = false;
 
@@ -1281,19 +1567,15 @@
 	if (edid == NULL || edid->extensions == 0)
 		goto end;
 
-	/* Chose real EDID extension number */
-	edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
-		       DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
 	/* Find CEA extension */
-	for (i = 0; i < edid_ext_num; i++) {
+	for (i = 0; i < edid->extensions; i++) {
 		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
 		/* This block is CEA extension */
 		if (edid_ext[0] == 0x02)
 			break;
 	}
 
-	if (i == edid_ext_num)
+	if (i == edid->extensions)
 		goto end;
 
 	/* Data block offset in CEA extension block */
@@ -1348,10 +1630,24 @@
 
 	quirks = edid_get_quirks(edid);
 
-	num_modes += add_established_modes(connector, edid);
-	num_modes += add_standard_modes(connector, edid);
+	/*
+	 * EDID spec says modes should be preferred in this order:
+	 * - preferred detailed mode
+	 * - other detailed modes from base block
+	 * - detailed modes from extension blocks
+	 * - CVT 3-byte code modes
+	 * - standard timing codes
+	 * - established timing codes
+	 * - modes inferred from GTF or CVT range information
+	 *
+	 * We don't quite implement this yet, but we're close.
+	 *
+	 * XXX order for additional mode types in extension blocks?
+	 */
 	num_modes += add_detailed_info(connector, edid, quirks);
 	num_modes += add_detailed_info_eedid(connector, edid, quirks);
+	num_modes += add_standard_modes(connector, edid);
+	num_modes += add_established_modes(connector, edid);
 
 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
 		edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f..b3779d2 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,33 @@
 
 static LIST_HEAD(kernel_fb_helper_list);
 
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
-	connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
-	if (!connector->fb_helper_private)
-		return -ENOMEM;
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_connector *connector;
+	int i;
 
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct drm_fb_helper_connector *fb_helper_connector;
+
+		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+		if (!fb_helper_connector)
+			goto fail;
+
+		fb_helper_connector->connector = connector;
+		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+	}
 	return 0;
+fail:
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		kfree(fb_helper->connector_info[i]);
+		fb_helper->connector_info[i] = NULL;
+	}
+	fb_helper->connector_count = 0;
+	return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
 /**
  * drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@
  *
  * enable/enable Digital/disable bit at the end
  */
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
 						       const char *mode_option)
 {
 	const char *name;
@@ -75,13 +93,13 @@
 	int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
 	int i;
 	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
-	struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
 	struct drm_fb_helper_cmdline_mode *cmdline_mode;
+	struct drm_connector *connector = fb_helper_conn->connector;
 
-	if (!fb_help_conn)
+	if (!fb_helper_conn)
 		return false;
 
-	cmdline_mode = &fb_help_conn->cmdline_mode;
+	cmdline_mode = &fb_helper_conn->cmdline_mode;
 	if (!mode_option)
 		mode_option = fb_mode_option;
 
@@ -204,18 +222,21 @@
 	return true;
 }
 
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 {
-	struct drm_connector *connector;
+	struct drm_fb_helper_connector *fb_helper_conn;
+	int i;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	for (i = 0; i < fb_helper->connector_count; i++) {
 		char *option = NULL;
 
+		fb_helper_conn = fb_helper->connector_info[i];
+
 		/* do something on return - turn off connector maybe */
-		if (fb_get_options(drm_get_connector_name(connector), &option))
+		if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
 			continue;
 
-		drm_fb_helper_connector_parse_command_line(connector, option);
+		drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
 	}
 	return 0;
 }
@@ -293,6 +314,7 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_crtc *crtc;
+	struct drm_crtc_helper_funcs *crtc_funcs;
 	struct drm_encoder *encoder;
 	int i;
 
@@ -300,33 +322,28 @@
 	 * For each CRTC in this fb, turn the crtc on then,
 	 * find all associated encoders and turn them on.
 	 */
+	mutex_lock(&dev->mode_config.mutex);
 	for (i = 0; i < fb_helper->crtc_count; i++) {
-		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-			struct drm_crtc_helper_funcs *crtc_funcs =
-				crtc->helper_private;
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+		crtc_funcs = crtc->helper_private;
 
-			/* Only mess with CRTCs in this fb */
-			if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
-			    !crtc->enabled)
-				continue;
+		if (!crtc->enabled)
+			continue;
 
-			mutex_lock(&dev->mode_config.mutex);
-			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-			mutex_unlock(&dev->mode_config.mutex);
+		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 
-			/* Found a CRTC on this fb, now find encoders */
-			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-				if (encoder->crtc == crtc) {
-					struct drm_encoder_helper_funcs *encoder_funcs;
 
-					encoder_funcs = encoder->helper_private;
-					mutex_lock(&dev->mode_config.mutex);
-					encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-					mutex_unlock(&dev->mode_config.mutex);
-				}
+		/* Found a CRTC on this fb, now find encoders */
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			if (encoder->crtc == crtc) {
+				struct drm_encoder_helper_funcs *encoder_funcs;
+
+				encoder_funcs = encoder->helper_private;
+				encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 			}
 		}
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 }
 
 static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_crtc *crtc;
+	struct drm_crtc_helper_funcs *crtc_funcs;
 	struct drm_encoder *encoder;
 	int i;
 
@@ -341,32 +359,26 @@
 	 * For each CRTC in this fb, find all associated encoders
 	 * and turn them off, then turn off the CRTC.
 	 */
+	mutex_lock(&dev->mode_config.mutex);
 	for (i = 0; i < fb_helper->crtc_count; i++) {
-		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-			struct drm_crtc_helper_funcs *crtc_funcs =
-				crtc->helper_private;
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+		crtc_funcs = crtc->helper_private;
 
-			/* Only mess with CRTCs in this fb */
-			if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
-			    !crtc->enabled)
-				continue;
+		if (!crtc->enabled)
+			continue;
 
-			/* Found a CRTC on this fb, now find encoders */
-			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-				if (encoder->crtc == crtc) {
-					struct drm_encoder_helper_funcs *encoder_funcs;
+		/* Found a CRTC on this fb, now find encoders */
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			if (encoder->crtc == crtc) {
+				struct drm_encoder_helper_funcs *encoder_funcs;
 
-					encoder_funcs = encoder->helper_private;
-					mutex_lock(&dev->mode_config.mutex);
-					encoder_funcs->dpms(encoder, dpms_mode);
-					mutex_unlock(&dev->mode_config.mutex);
-				}
+				encoder_funcs = encoder->helper_private;
+				encoder_funcs->dpms(encoder, dpms_mode);
 			}
-			mutex_lock(&dev->mode_config.mutex);
-			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-			mutex_unlock(&dev->mode_config.mutex);
 		}
+		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 }
 
 int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@
 {
 	int i;
 
+	for (i = 0; i < helper->connector_count; i++)
+		kfree(helper->connector_info[i]);
+	kfree(helper->connector_info);
 	for (i = 0; i < helper->crtc_count; i++)
 		kfree(helper->crtc_info[i].mode_set.connectors);
 	kfree(helper->crtc_info);
 }
 
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *fb_helper,
+		       int crtc_count, int max_conn_count)
 {
-	struct drm_device *dev = helper->dev;
 	struct drm_crtc *crtc;
 	int ret = 0;
 	int i;
 
-	helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
-	if (!helper->crtc_info)
+	fb_helper->dev = dev;
+
+	INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+	fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+	if (!fb_helper->crtc_info)
 		return -ENOMEM;
 
-	helper->crtc_count = crtc_count;
+	fb_helper->crtc_count = crtc_count;
+	fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+	if (!fb_helper->connector_info) {
+		kfree(fb_helper->crtc_info);
+		return -ENOMEM;
+	}
+	fb_helper->connector_count = 0;
 
 	for (i = 0; i < crtc_count; i++) {
-		helper->crtc_info[i].mode_set.connectors =
+		fb_helper->crtc_info[i].mode_set.connectors =
 			kcalloc(max_conn_count,
 				sizeof(struct drm_connector *),
 				GFP_KERNEL);
 
-		if (!helper->crtc_info[i].mode_set.connectors) {
+		if (!fb_helper->crtc_info[i].mode_set.connectors) {
 			ret = -ENOMEM;
 			goto out_free;
 		}
-		helper->crtc_info[i].mode_set.num_connectors = 0;
+		fb_helper->crtc_info[i].mode_set.num_connectors = 0;
 	}
 
 	i = 0;
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		helper->crtc_info[i].crtc_id = crtc->base.id;
-		helper->crtc_info[i].mode_set.crtc = crtc;
+		fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+		fb_helper->crtc_info[i].mode_set.crtc = crtc;
 		i++;
 	}
-	helper->conn_limit = max_conn_count;
+	fb_helper->conn_limit = max_conn_count;
 	return 0;
 out_free:
-	drm_fb_helper_crtc_free(helper);
+	drm_fb_helper_crtc_free(fb_helper);
 	return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+	if (!list_empty(&fb_helper->kernel_fb_list)) {
+		list_del(&fb_helper->kernel_fb_list);
+		if (list_empty(&kernel_fb_helper_list)) {
+			printk(KERN_INFO "drm: unregistered panic notifier\n");
+			atomic_notifier_chain_unregister(&panic_notifier_list,
+							 &paniced);
+			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+		}
+	}
+
+	drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
 
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
 	struct drm_fb_helper *fb_helper = info->par;
-	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc_helper_funcs *crtc_funcs;
 	u16 *red, *green, *blue, *transp;
 	struct drm_crtc *crtc;
 	int i, rc = 0;
 	int start;
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-		for (i = 0; i < fb_helper->crtc_count; i++) {
-			if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-				break;
-		}
-		if (i == fb_helper->crtc_count)
-			continue;
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+		crtc_funcs = crtc->helper_private;
 
 		red = cmap->red;
 		green = cmap->green;
@@ -549,41 +587,6 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_setcmap);
 
-int drm_fb_helper_setcolreg(unsigned regno,
-			    unsigned red,
-			    unsigned green,
-			    unsigned blue,
-			    unsigned transp,
-			    struct fb_info *info)
-{
-	struct drm_fb_helper *fb_helper = info->par;
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_crtc *crtc;
-	int i;
-	int ret;
-
-	if (regno > 255)
-		return 1;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-		for (i = 0; i < fb_helper->crtc_count; i++) {
-			if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-				break;
-		}
-		if (i == fb_helper->crtc_count)
-			continue;
-
-		ret = setcolreg(crtc, red, green, blue, regno, info);
-		if (ret)
-			return ret;
-
-		crtc_funcs->load_lut(crtc);
-	}
-	return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 			    struct fb_info *info)
 {
@@ -687,23 +690,21 @@
 		return -EINVAL;
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
-		for (i = 0; i < fb_helper->crtc_count; i++) {
-			if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-				break;
-		}
-		if (i == fb_helper->crtc_count)
-			continue;
-
-		if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
-			mutex_lock(&dev->mode_config.mutex);
-			ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+	mutex_lock(&dev->mode_config.mutex);
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+		ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+		if (ret) {
 			mutex_unlock(&dev->mode_config.mutex);
-			if (ret)
-				return ret;
+			return ret;
 		}
 	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (fb_helper->delayed_hotplug) {
+		fb_helper->delayed_hotplug = false;
+		drm_fb_helper_hotplug_event(fb_helper);
+	}
 	return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@
 	int ret = 0;
 	int i;
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		for (i = 0; i < fb_helper->crtc_count; i++) {
-			if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-				break;
-		}
-
-		if (i == fb_helper->crtc_count)
-			continue;
+	mutex_lock(&dev->mode_config.mutex);
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
 		modeset = &fb_helper->crtc_info[i].mode_set;
 
@@ -733,209 +729,138 @@
 		modeset->y = var->yoffset;
 
 		if (modeset->num_connectors) {
-			mutex_lock(&dev->mode_config.mutex);
 			ret = crtc->funcs->set_config(modeset);
-			mutex_unlock(&dev->mode_config.mutex);
 			if (!ret) {
 				info->var.xoffset = var->xoffset;
 				info->var.yoffset = var->yoffset;
 			}
 		}
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_pan_display);
 
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-				  int preferred_bpp,
-				  int (*fb_create)(struct drm_device *dev,
-						   uint32_t fb_width,
-						   uint32_t fb_height,
-						   uint32_t surface_width,
-						   uint32_t surface_height,
-						   uint32_t surface_depth,
-						   uint32_t surface_bpp,
-						   struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+				  int preferred_bpp)
 {
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-	unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
-	unsigned int surface_width = 0, surface_height = 0;
 	int new_fb = 0;
 	int crtc_count = 0;
-	int ret, i, conn_count = 0;
+	int i;
 	struct fb_info *info;
-	struct drm_framebuffer *fb;
-	struct drm_mode_set *modeset = NULL;
-	struct drm_fb_helper *fb_helper;
-	uint32_t surface_depth = 24, surface_bpp = 32;
+	struct drm_fb_helper_surface_size sizes;
+	int gamma_size = 0;
+
+	memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+	sizes.surface_depth = 24;
+	sizes.surface_bpp = 32;
+	sizes.fb_width = (unsigned)-1;
+	sizes.fb_height = (unsigned)-1;
 
 	/* if driver picks 8 or 16 by default use that
 	   for both depth/bpp */
-	if (preferred_bpp != surface_bpp) {
-		surface_depth = surface_bpp = preferred_bpp;
+	if (preferred_bpp != sizes.surface_bpp) {
+		sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
 	}
 	/* first up get a count of crtcs now in use and new min/maxes width/heights */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
 		struct drm_fb_helper_cmdline_mode *cmdline_mode;
 
-		if (!fb_help_conn)
-			continue;
-		
-		cmdline_mode = &fb_help_conn->cmdline_mode;
+		cmdline_mode = &fb_helper_conn->cmdline_mode;
 
 		if (cmdline_mode->bpp_specified) {
 			switch (cmdline_mode->bpp) {
 			case 8:
-				surface_depth = surface_bpp = 8;
+				sizes.surface_depth = sizes.surface_bpp = 8;
 				break;
 			case 15:
-				surface_depth = 15;
-				surface_bpp = 16;
+				sizes.surface_depth = 15;
+				sizes.surface_bpp = 16;
 				break;
 			case 16:
-				surface_depth = surface_bpp = 16;
+				sizes.surface_depth = sizes.surface_bpp = 16;
 				break;
 			case 24:
-				surface_depth = surface_bpp = 24;
+				sizes.surface_depth = sizes.surface_bpp = 24;
 				break;
 			case 32:
-				surface_depth = 24;
-				surface_bpp = 32;
+				sizes.surface_depth = 24;
+				sizes.surface_bpp = 32;
 				break;
 			}
 			break;
 		}
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (drm_helper_crtc_in_use(crtc)) {
-			if (crtc->desired_mode) {
-				if (crtc->desired_mode->hdisplay < fb_width)
-					fb_width = crtc->desired_mode->hdisplay;
+	crtc_count = 0;
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		struct drm_display_mode *desired_mode;
+		desired_mode = fb_helper->crtc_info[i].desired_mode;
 
-				if (crtc->desired_mode->vdisplay < fb_height)
-					fb_height = crtc->desired_mode->vdisplay;
-
-				if (crtc->desired_mode->hdisplay > surface_width)
-					surface_width = crtc->desired_mode->hdisplay;
-
-				if (crtc->desired_mode->vdisplay > surface_height)
-					surface_height = crtc->desired_mode->vdisplay;
-			}
+		if (desired_mode) {
+			if (gamma_size == 0)
+				gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+			if (desired_mode->hdisplay < sizes.fb_width)
+				sizes.fb_width = desired_mode->hdisplay;
+			if (desired_mode->vdisplay < sizes.fb_height)
+				sizes.fb_height = desired_mode->vdisplay;
+			if (desired_mode->hdisplay > sizes.surface_width)
+				sizes.surface_width = desired_mode->hdisplay;
+			if (desired_mode->vdisplay > sizes.surface_height)
+				sizes.surface_height = desired_mode->vdisplay;
 			crtc_count++;
 		}
 	}
 
-	if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
 		/* hmm everyone went away - assume VGA cable just fell out
 		   and will come back later. */
-		return 0;
+		DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+		sizes.fb_width = sizes.surface_width = 1024;
+		sizes.fb_height = sizes.surface_height = 768;
 	}
 
-	/* do we have an fb already? */
-	if (list_empty(&dev->mode_config.fb_kernel_list)) {
-		ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
-				   surface_height, surface_depth, surface_bpp,
-				   &fb);
-		if (ret)
-			return -EINVAL;
-		new_fb = 1;
-	} else {
-		fb = list_first_entry(&dev->mode_config.fb_kernel_list,
-				      struct drm_framebuffer, filp_head);
+	/* push down into drivers */
+	new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+	if (new_fb < 0)
+		return new_fb;
 
-		/* if someone hotplugs something bigger than we have already allocated, we are pwned.
-		   As really we can't resize an fbdev that is in the wild currently due to fbdev
-		   not really being designed for the lower layers moving stuff around under it.
-		   - so in the grand style of things - punt. */
-		if ((fb->width < surface_width) ||
-		    (fb->height < surface_height)) {
-			DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
-			return -EINVAL;
-		}
+	info = fb_helper->fbdev;
+
+	/* set the fb pointer */
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
 	}
 
-	info = fb->fbdev;
-	fb_helper = info->par;
-
-	crtc_count = 0;
-	/* okay we need to setup new connector sets in the crtcs */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		modeset = &fb_helper->crtc_info[crtc_count].mode_set;
-		modeset->fb = fb;
-		conn_count = 0;
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-			if (connector->encoder)
-				if (connector->encoder->crtc == modeset->crtc) {
-					modeset->connectors[conn_count] = connector;
-					conn_count++;
-					if (conn_count > fb_helper->conn_limit)
-						BUG();
-				}
-		}
-
-		for (i = conn_count; i < fb_helper->conn_limit; i++)
-			modeset->connectors[i] = NULL;
-
-		modeset->crtc = crtc;
-		crtc_count++;
-
-		modeset->num_connectors = conn_count;
-		if (modeset->crtc->desired_mode) {
-			if (modeset->mode)
-				drm_mode_destroy(dev, modeset->mode);
-			modeset->mode = drm_mode_duplicate(dev,
-							   modeset->crtc->desired_mode);
-		}
-	}
-	fb_helper->crtc_count = crtc_count;
-	fb_helper->fb = fb;
-
 	if (new_fb) {
 		info->var.pixclock = 0;
-		ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
-		if (ret)
-			return ret;
 		if (register_framebuffer(info) < 0) {
-			fb_dealloc_cmap(&info->cmap);
 			return -EINVAL;
 		}
+
+		printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+		       info->fix.id);
+
 	} else {
 		drm_fb_helper_set_par(info);
 	}
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-	       info->fix.id);
 
 	/* Switch back to kernel console on panic */
 	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		printk(KERN_INFO "registered panic notifier\n");
+		printk(KERN_INFO "drm: registered panic notifier\n");
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
-	list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+	if (new_fb)
+		list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
-	list_del(&helper->kernel_fb_list);
-	if (list_empty(&kernel_fb_helper_list)) {
-		printk(KERN_INFO "unregistered panic notifier\n");
-		atomic_notifier_chain_unregister(&panic_notifier_list,
-						 &paniced);
-		unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-	}
-	drm_fb_helper_crtc_free(helper);
-	fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth)
 {
@@ -954,10 +879,11 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fill_fix);
 
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height)
 {
-	info->pseudo_palette = fb->pseudo_palette;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	info->pseudo_palette = fb_helper->pseudo_palette;
 	info->var.xres_virtual = fb->width;
 	info->var.yres_virtual = fb->height;
 	info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@
 	info->var.yres = fb_height;
 }
 EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+					       uint32_t maxX,
+					       uint32_t maxY)
+{
+	struct drm_connector *connector;
+	int count = 0;
+	int i;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		count += connector->funcs->fill_modes(connector, maxX, maxY);
+	}
+
+	return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+		if (drm_mode_width(mode) > width ||
+		    drm_mode_height(mode) > height)
+			continue;
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			return mode;
+	}
+	return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+	struct drm_fb_helper_cmdline_mode *cmdline_mode;
+	cmdline_mode = &fb_connector->cmdline_mode;
+	return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+						      int width, int height)
+{
+	struct drm_fb_helper_cmdline_mode *cmdline_mode;
+	struct drm_display_mode *mode = NULL;
+
+	cmdline_mode = &fb_helper_conn->cmdline_mode;
+	if (cmdline_mode->specified == false)
+		return mode;
+
+	/* attempt to find a matching mode in the list of modes
+	 *  we have gotten so far, if not add a CVT mode that conforms
+	 */
+	if (cmdline_mode->rb || cmdline_mode->margins)
+		goto create_mode;
+
+	list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+		/* check width/height */
+		if (mode->hdisplay != cmdline_mode->xres ||
+		    mode->vdisplay != cmdline_mode->yres)
+			continue;
+
+		if (cmdline_mode->refresh_specified) {
+			if (mode->vrefresh != cmdline_mode->refresh)
+				continue;
+		}
+
+		if (cmdline_mode->interlace) {
+			if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+				continue;
+		}
+		return mode;
+	}
+
+create_mode:
+	mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+			    cmdline_mode->yres,
+			    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+			    cmdline_mode->rb, cmdline_mode->interlace,
+			    cmdline_mode->margins);
+	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+	list_add(&mode->head, &fb_helper_conn->connector->modes);
+	return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+	bool enable;
+
+	if (strict) {
+		enable = connector->status == connector_status_connected;
+	} else {
+		enable = connector->status != connector_status_disconnected;
+	}
+	return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+				  bool *enabled)
+{
+	bool any_enabled = false;
+	struct drm_connector *connector;
+	int i = 0;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		enabled[i] = drm_connector_enabled(connector, true);
+		DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+			  enabled[i] ? "yes" : "no");
+		any_enabled |= enabled[i];
+	}
+
+	if (any_enabled)
+		return;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		enabled[i] = drm_connector_enabled(connector, false);
+	}
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+			      struct drm_display_mode **modes,
+			      bool *enabled, int width, int height)
+{
+	int count, i, j;
+	bool can_clone = false;
+	struct drm_fb_helper_connector *fb_helper_conn;
+	struct drm_display_mode *dmt_mode, *mode;
+
+	/* only contemplate cloning in the single crtc case */
+	if (fb_helper->crtc_count > 1)
+		return false;
+
+	count = 0;
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (enabled[i])
+			count++;
+	}
+
+	/* only contemplate cloning if more than one connector is enabled */
+	if (count <= 1)
+		return false;
+
+	/* check the command line or if nothing common pick 1024x768 */
+	can_clone = true;
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (!enabled[i])
+			continue;
+		fb_helper_conn = fb_helper->connector_info[i];
+		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+		if (!modes[i]) {
+			can_clone = false;
+			break;
+		}
+		for (j = 0; j < i; j++) {
+			if (!enabled[j])
+				continue;
+			if (!drm_mode_equal(modes[j], modes[i]))
+				can_clone = false;
+		}
+	}
+
+	if (can_clone) {
+		DRM_DEBUG_KMS("can clone using command line\n");
+		return true;
+	}
+
+	/* try and find a 1024x768 mode on each connector */
+	can_clone = true;
+	dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+
+		if (!enabled[i])
+			continue;
+
+		fb_helper_conn = fb_helper->connector_info[i];
+		list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+			if (drm_mode_equal(mode, dmt_mode))
+				modes[i] = mode;
+		}
+		if (!modes[i])
+			can_clone = false;
+	}
+
+	if (can_clone) {
+		DRM_DEBUG_KMS("can clone using 1024x768\n");
+		return true;
+	}
+	DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+	return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+				 struct drm_display_mode **modes,
+				 bool *enabled, int width, int height)
+{
+	struct drm_fb_helper_connector *fb_helper_conn;
+	int i;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		fb_helper_conn = fb_helper->connector_info[i];
+
+		if (enabled[i] == false)
+			continue;
+
+		DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+			      fb_helper_conn->connector->base.id);
+
+		/* got for command line mode first */
+		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+		if (!modes[i]) {
+			DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+				      fb_helper_conn->connector->base.id);
+			modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+		}
+		/* No preferred modes, pick one off the list */
+		if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+			list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+				break;
+		}
+		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+			  "none");
+	}
+	return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+			  struct drm_fb_helper_crtc **best_crtcs,
+			  struct drm_display_mode **modes,
+			  int n, int width, int height)
+{
+	int c, o;
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_connector *connector;
+	struct drm_connector_helper_funcs *connector_funcs;
+	struct drm_encoder *encoder;
+	struct drm_fb_helper_crtc *best_crtc;
+	int my_score, best_score, score;
+	struct drm_fb_helper_crtc **crtcs, *crtc;
+	struct drm_fb_helper_connector *fb_helper_conn;
+
+	if (n == fb_helper->connector_count)
+		return 0;
+
+	fb_helper_conn = fb_helper->connector_info[n];
+	connector = fb_helper_conn->connector;
+
+	best_crtcs[n] = NULL;
+	best_crtc = NULL;
+	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+	if (modes[n] == NULL)
+		return best_score;
+
+	crtcs = kzalloc(dev->mode_config.num_connector *
+			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+	if (!crtcs)
+		return best_score;
+
+	my_score = 1;
+	if (connector->status == connector_status_connected)
+		my_score++;
+	if (drm_has_cmdline_mode(fb_helper_conn))
+		my_score++;
+	if (drm_has_preferred_mode(fb_helper_conn, width, height))
+		my_score++;
+
+	connector_funcs = connector->helper_private;
+	encoder = connector_funcs->best_encoder(connector);
+	if (!encoder)
+		goto out;
+
+	/* select a crtc for this connector and then attempt to configure
+	   remaining connectors */
+	for (c = 0; c < fb_helper->crtc_count; c++) {
+		crtc = &fb_helper->crtc_info[c];
+
+		if ((encoder->possible_crtcs & (1 << c)) == 0) {
+			continue;
+		}
+
+		for (o = 0; o < n; o++)
+			if (best_crtcs[o] == crtc)
+				break;
+
+		if (o < n) {
+			/* ignore cloning unless only a single crtc */
+			if (fb_helper->crtc_count > 1)
+				continue;
+
+			if (!drm_mode_equal(modes[o], modes[n]))
+				continue;
+		}
+
+		crtcs[n] = crtc;
+		memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+						  width, height);
+		if (score > best_score) {
+			best_crtc = crtc;
+			best_score = score;
+			memcpy(best_crtcs, crtcs,
+			       dev->mode_config.num_connector *
+			       sizeof(struct drm_fb_helper_crtc *));
+		}
+	}
+out:
+	kfree(crtcs);
+	return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_fb_helper_crtc **crtcs;
+	struct drm_display_mode **modes;
+	struct drm_encoder *encoder;
+	struct drm_mode_set *modeset;
+	bool *enabled;
+	int width, height;
+	int i, ret;
+
+	DRM_DEBUG_KMS("\n");
+
+	width = dev->mode_config.max_width;
+	height = dev->mode_config.max_height;
+
+	/* clean out all the encoder/crtc combos */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->crtc = NULL;
+	}
+
+	crtcs = kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+	modes = kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_display_mode *), GFP_KERNEL);
+	enabled = kcalloc(dev->mode_config.num_connector,
+			  sizeof(bool), GFP_KERNEL);
+
+	drm_enable_connectors(fb_helper, enabled);
+
+	ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+	if (!ret) {
+		ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+		if (!ret)
+			DRM_ERROR("Unable to find initial modes\n");
+	}
+
+	DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+	drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+	/* need to set the modesets up here for use later */
+	/* fill out the connector<->crtc mappings into the modesets */
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		modeset = &fb_helper->crtc_info[i].mode_set;
+		modeset->num_connectors = 0;
+	}
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_display_mode *mode = modes[i];
+		struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+		modeset = &fb_crtc->mode_set;
+
+		if (mode && fb_crtc) {
+			DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+				      mode->name, fb_crtc->mode_set.crtc->base.id);
+			fb_crtc->desired_mode = mode;
+			if (modeset->mode)
+				drm_mode_destroy(dev, modeset->mode);
+			modeset->mode = drm_mode_duplicate(dev,
+							   fb_crtc->desired_mode);
+			modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+		}
+	}
+
+	kfree(crtcs);
+	kfree(modes);
+	kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+	struct drm_device *dev = fb_helper->dev;
+	int count = 0;
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(fb_helper->dev);
+
+	drm_fb_helper_parse_command_line(fb_helper);
+
+	count = drm_fb_helper_probe_connector_modes(fb_helper,
+						    dev->mode_config.max_width,
+						    dev->mode_config.max_height);
+	/*
+	 * we shouldn't end up with no modes here.
+	 */
+	if (count == 0) {
+		printk(KERN_INFO "No connectors reported connected with modes\n");
+	}
+	drm_setup_crtcs(fb_helper);
+
+	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+	int count = 0;
+	u32 max_width, max_height, bpp_sel;
+	bool bound = false, crtcs_bound = false;
+	struct drm_crtc *crtc;
+
+	if (!fb_helper->fb)
+		return false;
+
+	list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+		if (crtc->fb)
+			crtcs_bound = true;
+		if (crtc->fb == fb_helper->fb)
+			bound = true;
+	}
+
+	if (!bound && crtcs_bound) {
+		fb_helper->delayed_hotplug = true;
+		return false;
+	}
+	DRM_DEBUG_KMS("\n");
+
+	max_width = fb_helper->fb->width;
+	max_height = fb_helper->fb->height;
+	bpp_sel = fb_helper->fb->bits_per_pixel;
+
+	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+						    max_height);
+	drm_setup_crtcs(fb_helper);
+
+	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 9d532d7..e7aace2 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -243,11 +243,10 @@
 
 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
 
-	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	memset(priv, 0, sizeof(*priv));
 	filp->private_data = priv;
 	priv->filp = filp;
 	priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b..33dad3f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@
 }
 
 /**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size)
+{
+	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+	obj->dev = dev;
+	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+	if (IS_ERR(obj->filp))
+		return -ENOMEM;
+
+	kref_init(&obj->refcount);
+	kref_init(&obj->handlecount);
+	obj->size = size;
+
+	atomic_inc(&dev->object_count);
+	atomic_add(obj->size, &dev->object_memory);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
  * Allocate a GEM object of the specified size with shmfs backing store
  */
 struct drm_gem_object *
@@ -131,28 +156,22 @@
 {
 	struct drm_gem_object *obj;
 
-	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (!obj)
 		goto free;
 
-	obj->dev = dev;
-	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
-	if (IS_ERR(obj->filp))
+	if (drm_gem_object_init(dev, obj, size) != 0)
 		goto free;
 
-	kref_init(&obj->refcount);
-	kref_init(&obj->handlecount);
-	obj->size = size;
 	if (dev->driver->gem_init_object != NULL &&
 	    dev->driver->gem_init_object(obj) != 0) {
 		goto fput;
 	}
-	atomic_inc(&dev->object_count);
-	atomic_add(obj->size, &dev->object_memory);
 	return obj;
 fput:
+	/* Object_init mangles the global counters - readjust them. */
+	atomic_dec(&dev->object_count);
+	atomic_sub(obj->size, &dev->object_memory);
 	fput(obj->filp);
 free:
 	kfree(obj);
@@ -403,15 +422,15 @@
 	idr_destroy(&file_private->object_idr);
 }
 
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	fput(obj->filp);
 	atomic_dec(&dev->object_count);
 	atomic_sub(obj->size, &dev->object_memory);
-	kfree(obj);
 }
+EXPORT_SYMBOL(drm_gem_object_release);
 
 /**
  * Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@
 
 	if (dev->driver->gem_free_object != NULL)
 		dev->driver->gem_free_object(obj);
-
-	drm_gem_object_free_common(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
@@ -453,8 +470,6 @@
 		dev->driver->gem_free_object(obj);
 		mutex_unlock(&dev->struct_mutex);
 	}
-
-	drm_gem_object_free_common(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_free_unlocked);
 
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d6339..f1f473e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@
 	drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
 	/* 18/16. Find actual vertical frame frequency */
 	/* ignore - just set the mode flag for interlaced */
-	if (interlaced)
+	if (interlaced) {
 		drm_mode->vtotal *= 2;
+		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	}
 	/* Fill the mode line name */
 	drm_mode_set_name(drm_mode);
 	if (reduced)
@@ -268,43 +270,35 @@
 	else
 		drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
 					DRM_MODE_FLAG_NHSYNC);
-	if (interlaced)
-		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 
-    return drm_mode;
+	return drm_mode;
 }
 EXPORT_SYMBOL(drm_cvt_mode);
 
 /**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
  *
  * @dev		:drm device
  * @hdisplay	:hdisplay size
  * @vdisplay	:vdisplay size
  * @vrefresh	:vrefresh rate.
  * @interlaced	:whether the interlace is supported
- * @margins	:whether the margin is supported
+ * @margins	:desired margin size
+ * @GTF_[MCKJ]  :extended GTF formula parameters
  *
  * LOCKING.
  * none.
  *
- * return the modeline based on GTF algorithm
+ * return the modeline based on full GTF algorithm.
  *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- *	GTF Spreadsheet by Andy Morrish (1/5/97)
- *	available at http://www.vesa.org
- *
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two.  For a C of 40, pass in 80.
  */
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
-				      int vdisplay, int vrefresh,
-				      bool interlaced, int margins)
-{
-	/* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+		     int vrefresh, bool interlaced, int margins,
+		     int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{	/* 1) top/bottom margin size (% of height) - default: 1.8, */
 #define	GTF_MARGIN_PERCENTAGE		18
 	/* 2) character cell horizontal granularity (pixels) - default 8 */
 #define	GTF_CELL_GRAN			8
@@ -316,17 +310,9 @@
 #define H_SYNC_PERCENT			8
 	/* min time of vsync + back porch (microsec) */
 #define MIN_VSYNC_PLUS_BP		550
-	/* blanking formula gradient */
-#define GTF_M				600
-	/* blanking formula offset */
-#define GTF_C				40
-	/* blanking formula scaling factor */
-#define GTF_K				128
-	/* blanking formula scaling factor */
-#define GTF_J				20
 	/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME		(((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME		(GTF_K * GTF_M / 256)
+#define GTF_C_PRIME	((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME	(GTF_K * GTF_M / 256)
 	struct drm_display_mode *drm_mode;
 	unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
 	int top_margin, bottom_margin;
@@ -460,17 +446,61 @@
 
 	drm_mode->clock = pixel_freq;
 
-	drm_mode_set_name(drm_mode);
-	drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
 	if (interlaced) {
 		drm_mode->vtotal *= 2;
 		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 	}
 
+	drm_mode_set_name(drm_mode);
+	if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+		drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+	else
+		drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
 	return drm_mode;
 }
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev		:drm device
+ * @hdisplay	:hdisplay size
+ * @vdisplay	:vdisplay size
+ * @vrefresh	:vrefresh rate.
+ * @interlaced	:whether the interlace is supported
+ * @margins	:whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *	GTF Spreadsheet by Andy Morrish (1/5/97)
+ *	available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+	     bool lace, int margins)
+{
+	return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+				    margins, 600, 40 * 2, 128, 20 * 2);
+}
 EXPORT_SYMBOL(drm_gtf_mode);
+
 /**
  * drm_mode_set_name - set the name on a mode
  * @mode: name will be set in this mode
@@ -482,8 +512,11 @@
  */
 void drm_mode_set_name(struct drm_display_mode *mode)
 {
-	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
-		 mode->vdisplay);
+	bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+		 mode->hdisplay, mode->vdisplay,
+		 interlaced ? "i" : "");
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30..101d381 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -193,8 +193,9 @@
 			"disabled");
 }
 
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
-			 char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+			 struct bin_attribute *attr, char *buf, loff_t off,
+			 size_t count)
 {
 	struct device *connector_dev = container_of(kobj, struct device, kobj);
 	struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -333,7 +334,7 @@
 static struct bin_attribute edid_attr = {
 	.attr.name = "edid",
 	.attr.mode = 0444,
-	.size = 128,
+	.size = 0,
 	.read = edid_show,
 };
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84..9563901 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
 obj-$(CONFIG_DRM_I915)  += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50..0d6ff64 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@
 	void (*dpms)(struct intel_dvo_device *dvo, int mode);
 
 	/*
-	 * Saves the output's state for restoration on VT switch.
-	 */
-	void (*save)(struct intel_dvo_device *dvo);
-
-	/*
-	 * Restore's the output's state at VT switch.
-	 */
-	void (*restore)(struct intel_dvo_device *dvo);
-
-	/*
 	 * Callback for testing a video mode for a given output.
 	 *
 	 * This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14..14d5980 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
 #define CH7017_BANG_LIMIT_CONTROL	0x7f
 
 struct ch7017_priv {
-	uint8_t save_hapi;
-	uint8_t save_vali;
-	uint8_t save_valo;
-	uint8_t save_ailo;
-	uint8_t save_lvds_pll_vco;
-	uint8_t save_feedback_div;
-	uint8_t save_lvds_control_2;
-	uint8_t save_outputs_enable;
-	uint8_t save_lvds_power_down;
-	uint8_t save_power_management;
+	uint8_t dummy;
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@
 	DUMP(CH7017_LVDS_POWER_DOWN);
 }
 
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
-	struct ch7017_priv *priv = dvo->dev_priv;
-
-	ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
-	ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
-	ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
-	ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
-	ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
-	ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
-	ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
-	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
-	ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
-	struct ch7017_priv *priv = dvo->dev_priv;
-
-	/* Power down before changing mode */
-	ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
-	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
-	ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
-	ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
-	ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
-	ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
-	ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
-	ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
-	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
-	ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
 static void ch7017_destroy(struct intel_dvo_device *dvo)
 {
 	struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@
 	.mode_set = ch7017_mode_set,
 	.dpms = ch7017_dpms,
 	.dump_regs = ch7017_dump_regs,
-	.save = ch7017_save,
-	.restore = ch7017_restore,
 	.destroy = ch7017_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5c..6f1944b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@
 	{ CH7301_VID, "CH7301" },
 };
 
-struct ch7xxx_reg_state {
-    uint8_t regs[CH7xxx_NUM_REGS];
-};
-
 struct ch7xxx_priv {
 	bool quiet;
-
-	struct ch7xxx_reg_state save_reg;
-	struct ch7xxx_reg_state mode_reg;
-	uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
-	uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
 };
 
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
 static char *ch7xxx_get_id(uint8_t vid)
 {
 	int i;
@@ -312,42 +301,17 @@
 
 static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 {
-	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
 	int i;
 
 	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+		uint8_t val;
 		if ((i % 8) == 0 )
 			DRM_LOG_KMS("\n %02X: ", i);
-		DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+		ch7xxx_readb(dvo, i, &val);
+		DRM_LOG_KMS("%02X ", val);
 	}
 }
 
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
-	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
-	ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
-	ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
-	ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
-	ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
-	ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
-	ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
-	ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
-	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
-	ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
-	ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
-	ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
-	ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
-	ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
-	ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
-	ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
 static void ch7xxx_destroy(struct intel_dvo_device *dvo)
 {
 	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@
 	.mode_set = ch7xxx_mode_set,
 	.dpms = ch7xxx_dpms,
 	.dump_regs = ch7xxx_dump_regs,
-	.save = ch7xxx_save,
-	.restore = ch7xxx_restore,
 	.destroy = ch7xxx_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e5..a2ec3f4 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@
 	bool quiet;
 
 	uint16_t width, height;
-
-	uint16_t save_VR01;
-	uint16_t save_VR40;
 };
 
 
@@ -405,22 +402,6 @@
 	DRM_LOG_KMS("VR8F: 0x%04x\n", val);
 }
 
-static void ivch_save(struct intel_dvo_device *dvo)
-{
-	struct ivch_priv *priv = dvo->dev_priv;
-
-	ivch_read(dvo, VR01, &priv->save_VR01);
-	ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
-	struct ivch_priv *priv = dvo->dev_priv;
-
-	ivch_write(dvo, VR01, priv->save_VR01);
-	ivch_write(dvo, VR40, priv->save_VR40);
-}
-
 static void ivch_destroy(struct intel_dvo_device *dvo)
 {
 	struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@
 struct intel_dvo_dev_ops ivch_ops= {
 	.init = ivch_init,
 	.dpms = ivch_dpms,
-	.save = ivch_save,
-	.restore = ivch_restore,
 	.mode_valid = ivch_mode_valid,
 	.mode_set = ivch_mode_set,
 	.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13..9b8e676 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@
 
 #define SIL164_REGC 0x0c
 
-struct sil164_save_rec {
-	uint8_t reg8;
-	uint8_t reg9;
-	uint8_t regc;
-};
-
 struct sil164_priv {
 	//I2CDevRec d;
 	bool quiet;
-	struct sil164_save_rec save_regs;
-	struct sil164_save_rec mode_regs;
 };
 
 #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@
 	DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
 }
 
-static void sil164_save(struct intel_dvo_device *dvo)
-{
-	struct sil164_priv *sil= dvo->dev_priv;
-
-	if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
-		return;
-
-	if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
-		return;
-
-	if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
-		return;
-
-	return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
-	struct sil164_priv *sil = dvo->dev_priv;
-
-	/* Restore it powered down initially */
-	sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
-	sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
-	sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
-	sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
 static void sil164_destroy(struct intel_dvo_device *dvo)
 {
 	struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@
 	.mode_set = sil164_mode_set,
 	.dpms = sil164_dpms,
 	.dump_regs = sil164_dump_regs,
-	.save = sil164_save,
-	.restore = sil164_restore,
 	.destroy = sil164_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc..66c697b 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
 #define TFP410_V_RES_LO		0x3C
 #define TFP410_V_RES_HI		0x3D
 
-struct tfp410_save_rec {
-	uint8_t ctl1;
-	uint8_t ctl2;
-};
-
 struct tfp410_priv {
 	bool quiet;
-
-	struct tfp410_save_rec saved_reg;
-	struct tfp410_save_rec mode_reg;
 };
 
 static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@
 	DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
 }
 
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
-	struct tfp410_priv *tfp = dvo->dev_priv;
-
-	if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
-		return;
-
-	if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
-		return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
-	struct tfp410_priv *tfp = dvo->dev_priv;
-
-	/* Restore it powered down initially */
-	tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
-	tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
-	tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
 static void tfp410_destroy(struct intel_dvo_device *dvo)
 {
 	struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@
 	.mode_set = tfp410_mode_set,
 	.dpms = tfp410_dpms,
 	.dump_regs = tfp410_dump_regs,
-	.save = tfp410_save,
-	.restore = tfp410_restore,
 	.destroy = tfp410_destroy,
 };
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447..322070c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@
 		spin_lock(lock);
 	list_for_each_entry(obj_priv, head, list)
 	{
-		struct drm_gem_object *obj = obj_priv->obj;
-
 		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
-			   obj,
+			   &obj_priv->base,
 			   get_pin_flag(obj_priv),
-			   obj->size,
-			   obj->read_domains, obj->write_domain,
+			   obj_priv->base.size,
+			   obj_priv->base.read_domains,
+			   obj_priv->base.write_domain,
 			   obj_priv->last_rendering_seqno,
 			   obj_priv->dirty ? " dirty" : "",
 			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
-		if (obj->name)
-			seq_printf(m, " (name: %d)", obj->name);
+		if (obj_priv->base.name)
+			seq_printf(m, " (name: %d)", obj_priv->base.name);
 		if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
 			seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
 		if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@
 	spin_lock(&dev_priv->mm.active_list_lock);
 
 	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-		obj = obj_priv->obj;
+		obj = &obj_priv->base;
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
 		    ret = i915_gem_object_get_pages(obj, 0);
 		    if (ret) {
@@ -567,23 +566,14 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	struct drm_crtc *crtc;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	bool fbc_enabled = false;
 
-	if (!dev_priv->display.fbc_enabled) {
+	if (!I915_HAS_FBC(dev)) {
 		seq_printf(m, "FBC unsupported on this chipset\n");
 		return 0;
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (!crtc->enabled)
-			continue;
-		if (dev_priv->display.fbc_enabled(crtc))
-			fbc_enabled = true;
-	}
-
-	if (fbc_enabled) {
+	if (intel_fbc_enabled(dev)) {
 		seq_printf(m, "FBC enabled\n");
 	} else {
 		seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafc..2a6b5de 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,13 +1357,12 @@
 
 	dev_priv->cfb_size = size;
 
+	intel_disable_fbc(dev);
 	dev_priv->compressed_fb = compressed_fb;
 
 	if (IS_GM45(dev)) {
-		g4x_disable_fbc(dev);
 		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
 	} else {
-		i8xx_disable_fbc(dev);
 		I915_WRITE(FBC_CFB_BASE, cfb_base);
 		I915_WRITE(FBC_LL_BASE, ll_base);
 		dev_priv->compressed_llb = compressed_llb;
@@ -1504,8 +1503,8 @@
 
 	I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
 
-	drm_helper_initial_config(dev);
-
+	intel_fbdev_init(dev);
+	drm_kms_helper_poll_init(dev);
 	return 0;
 
 destroy_ringbuffer:
@@ -1591,7 +1590,7 @@
  */
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv;
 	resource_size_t base, size;
 	int ret = 0, mmio_bar;
 	uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@
 	/* Start out suspended */
 	dev_priv->mm.suspended = 1;
 
+	intel_detect_pch(dev);
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev, prealloc_start,
 					     prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@
 	}
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		intel_modeset_cleanup(dev);
+
 		/*
 		 * free the memory space allocated for the child device
 		 * config parsed from VBT
@@ -1792,8 +1795,6 @@
 	intel_opregion_free(dev, 0);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		intel_modeset_cleanup(dev);
-
 		i915_gem_free_all_phys_object(dev);
 
 		mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537..5c51e45 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
+#define INTEL_PCH_DEVICE_ID_MASK	0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pch;
+
+	/*
+	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+	 * make graphics device passthrough work easy for VMM, that only
+	 * need to expose ISA bridge to let driver know the real hardware
+	 * underneath. This is a requirement from virtualization team.
+	 */
+	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+	if (pch) {
+		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+			int id;
+			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+			if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_CPT;
+				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+			}
+		}
+		pci_dev_put(pch);
+	}
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e47900..7f797ef 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@
 
 struct drm_i915_fence_reg {
 	struct drm_gem_object *obj;
+	struct list_head lru_list;
 };
 
 struct sdvo_device_mapping {
@@ -135,6 +136,7 @@
 	u8 slave_addr;
 	u8 dvo_wiring;
 	u8 initialized;
+	u8 ddc_pin;
 };
 
 struct drm_i915_error_state {
@@ -175,7 +177,7 @@
 
 struct drm_i915_display_funcs {
 	void (*dpms)(struct drm_crtc *crtc, int mode);
-	bool (*fbc_enabled)(struct drm_crtc *crtc);
+	bool (*fbc_enabled)(struct drm_device *dev);
 	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
 	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +224,13 @@
 	FBC_NOT_TILED, /* buffer not tiled */
 };
 
+enum intel_pch {
+	PCH_IBX,	/* Ibexpeak PCH */
+	PCH_CPT,	/* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 
@@ -335,6 +344,9 @@
 	/* Display functions */
 	struct drm_i915_display_funcs display;
 
+	/* PCH chipset type */
+	enum intel_pch pch_type;
+
 	/* Register state */
 	bool modeset_on_lid;
 	u8 saveLBB;
@@ -637,11 +649,14 @@
 
 	struct drm_mm_node *compressed_fb;
 	struct drm_mm_node *compressed_llb;
+
+	/* list of fbdev register on this device */
+	struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
-	struct drm_gem_object *obj;
+	struct drm_gem_object base;
 
 	/** Current space allocated to this object in the GTT, if any. */
 	struct drm_mm_node *gtt_space;
@@ -651,9 +666,6 @@
 	/** This object's place on GPU write list */
 	struct list_head gpu_write_list;
 
-	/** This object's place on the fenced object LRU */
-	struct list_head fence_list;
-
 	/**
 	 * This is set if the object is on the active or flushing lists
 	 * (has pending rendering), and is not set if it's on inactive (ready
@@ -740,7 +752,7 @@
 	atomic_t pending_flip;
 };
 
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
 /**
  * Request queue structure.
@@ -902,6 +914,8 @@
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+					      size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
 void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1012,12 @@
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void i8xx_disable_fbc(struct drm_device *dev);
 extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1150,8 @@
 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
 #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
-					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+					!IS_GEN6(dev))
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 /* dsparb controlled by hw only */
 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1165,9 @@
 			    IS_GEN6(dev))
 #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
 
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91d..112699f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@
 	args->size = roundup(args->size, PAGE_SIZE);
 
 	/* Allocate the new object */
-	obj = drm_gem_object_alloc(dev, args->size);
+	obj = i915_gem_alloc_object(dev, args->size);
 	if (obj == NULL)
 		return -ENOMEM;
 
@@ -1051,7 +1051,9 @@
 		 * about to occur.
 		 */
 		if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-			list_move_tail(&obj_priv->fence_list,
+			struct drm_i915_fence_reg *reg =
+				&dev_priv->fence_regs[obj_priv->fence_reg];
+			list_move_tail(&reg->lru_list,
 				       &dev_priv->mm.fence_list);
 		}
 
@@ -1566,7 +1568,7 @@
 	list_for_each_entry_safe(obj_priv, next,
 				 &dev_priv->mm.gpu_write_list,
 				 gpu_write_list) {
-		struct drm_gem_object *obj = obj_priv->obj;
+		struct drm_gem_object *obj = &obj_priv->base;
 
 		if ((obj->write_domain & flush_domains) ==
 		    obj->write_domain) {
@@ -1577,9 +1579,12 @@
 			i915_gem_object_move_to_active(obj, seqno);
 
 			/* update the fence lru list */
-			if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-				list_move_tail(&obj_priv->fence_list,
+			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+				struct drm_i915_fence_reg *reg =
+					&dev_priv->fence_regs[obj_priv->fence_reg];
+				list_move_tail(&reg->lru_list,
 						&dev_priv->mm.fence_list);
+			}
 
 			trace_i915_gem_object_change_domain(obj,
 							    obj->read_domains,
@@ -1745,7 +1750,7 @@
 		obj_priv = list_first_entry(&dev_priv->mm.active_list,
 					    struct drm_i915_gem_object,
 					    list);
-		obj = obj_priv->obj;
+		obj = &obj_priv->base;
 
 		/* If the seqno being retired doesn't match the oldest in the
 		 * list, then the oldest in the list must still be newer than
@@ -2119,7 +2124,7 @@
 
 	/* Try to find the smallest clean object */
 	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-		struct drm_gem_object *obj = obj_priv->obj;
+		struct drm_gem_object *obj = &obj_priv->base;
 		if (obj->size >= min_size) {
 			if ((!obj_priv->dirty ||
 			     i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2258,7 @@
 
 			/* Find an object that we can immediately reuse */
 			list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-				obj = obj_priv->obj;
+				obj = &obj_priv->base;
 				if (obj->size >= min_size)
 					break;
 
@@ -2485,9 +2490,10 @@
 
 	/* None available, try to steal one or wait for a user to finish */
 	i = I915_FENCE_REG_NONE;
-	list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
-			    fence_list) {
-		obj = obj_priv->obj;
+	list_for_each_entry(reg, &dev_priv->mm.fence_list,
+			    lru_list) {
+		obj = reg->obj;
+		obj_priv = to_intel_bo(obj);
 
 		if (obj_priv->pin_count)
 			continue;
@@ -2536,7 +2542,8 @@
 
 	/* Just update our place in the LRU if our fence is getting used. */
 	if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-		list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+		reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 		return 0;
 	}
 
@@ -2566,7 +2573,7 @@
 
 	obj_priv->fence_reg = ret;
 	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-	list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+	list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 
 	reg->obj = obj;
 
@@ -2598,6 +2605,8 @@
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_i915_fence_reg *reg =
+		&dev_priv->fence_regs[obj_priv->fence_reg];
 
 	if (IS_GEN6(dev)) {
 		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2625,9 @@
 		I915_WRITE(fence_reg, 0);
 	}
 
-	dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+	reg->obj = NULL;
 	obj_priv->fence_reg = I915_FENCE_REG_NONE;
-	list_del_init(&obj_priv->fence_list);
+	list_del_init(&reg->lru_list);
 }
 
 /**
@@ -4471,34 +4480,38 @@
 	return 0;
 }
 
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+					      size_t size)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (obj == NULL)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+		kfree(obj);
+		return NULL;
+	}
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+	obj->agp_type = AGP_USER_MEMORY;
+	obj->base.driver_private = NULL;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+	INIT_LIST_HEAD(&obj->list);
+	INIT_LIST_HEAD(&obj->gpu_write_list);
+	obj->madv = I915_MADV_WILLNEED;
+
+	trace_i915_gem_object_create(&obj->base);
+
+	return &obj->base;
+}
+
 int i915_gem_init_object(struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv;
-
-	obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
-	if (obj_priv == NULL)
-		return -ENOMEM;
-
-	/*
-	 * We've just allocated pages from the kernel,
-	 * so they've just been written by the CPU with
-	 * zeros. They'll need to be clflushed before we
-	 * use them with the GPU.
-	 */
-	obj->write_domain = I915_GEM_DOMAIN_CPU;
-	obj->read_domains = I915_GEM_DOMAIN_CPU;
-
-	obj_priv->agp_type = AGP_USER_MEMORY;
-
-	obj->driver_private = obj_priv;
-	obj_priv->obj = obj;
-	obj_priv->fence_reg = I915_FENCE_REG_NONE;
-	INIT_LIST_HEAD(&obj_priv->list);
-	INIT_LIST_HEAD(&obj_priv->gpu_write_list);
-	INIT_LIST_HEAD(&obj_priv->fence_list);
-	obj_priv->madv = I915_MADV_WILLNEED;
-
-	trace_i915_gem_object_create(obj);
+	BUG();
 
 	return 0;
 }
@@ -4521,9 +4534,11 @@
 	if (obj_priv->mmap_offset)
 		i915_gem_free_mmap_offset(obj);
 
+	drm_gem_object_release(obj);
+
 	kfree(obj_priv->page_cpu_valid);
 	kfree(obj_priv->bit_17);
-	kfree(obj->driver_private);
+	kfree(obj_priv);
 }
 
 /** Unbinds all inactive objects. */
@@ -4536,9 +4551,9 @@
 		struct drm_gem_object *obj;
 		int ret;
 
-		obj = list_first_entry(&dev_priv->mm.inactive_list,
-				       struct drm_i915_gem_object,
-				       list)->obj;
+		obj = &list_first_entry(&dev_priv->mm.inactive_list,
+					struct drm_i915_gem_object,
+					list)->base;
 
 		ret = i915_gem_object_unbind(obj);
 		if (ret != 0) {
@@ -4608,7 +4623,7 @@
 	struct drm_i915_gem_object *obj_priv;
 	int ret;
 
-	obj = drm_gem_object_alloc(dev, 4096);
+	obj = i915_gem_alloc_object(dev, 4096);
 	if (obj == NULL) {
 		DRM_ERROR("Failed to allocate seqno page\n");
 		ret = -ENOMEM;
@@ -4653,7 +4668,7 @@
 	if (!I915_NEED_GFX_HWS(dev))
 		return 0;
 
-	obj = drm_gem_object_alloc(dev, 4096);
+	obj = i915_gem_alloc_object(dev, 4096);
 	if (obj == NULL) {
 		DRM_ERROR("Failed to allocate status page\n");
 		ret = -ENOMEM;
@@ -4764,7 +4779,7 @@
 	if (ret != 0)
 		return ret;
 
-	obj = drm_gem_object_alloc(dev, 128 * 1024);
+	obj = i915_gem_alloc_object(dev, 128 * 1024);
 	if (obj == NULL) {
 		DRM_ERROR("Failed to allocate ringbuffer\n");
 		i915_gem_cleanup_hws(dev);
@@ -4957,6 +4972,8 @@
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 	INIT_LIST_HEAD(&dev_priv->mm.request_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	for (i = 0; i < 16; i++)
+		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
 	dev_priv->mm.next_gem_seqno = 1;
@@ -5185,6 +5202,20 @@
 }
 
 static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int lists_empty;
+
+	spin_lock(&dev_priv->mm.active_list_lock);
+	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+		      list_empty(&dev_priv->mm.active_list);
+	spin_unlock(&dev_priv->mm.active_list_lock);
+
+	return !lists_empty;
+}
+
+static int
 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 {
 	drm_i915_private_t *dev_priv, *next_dev;
@@ -5213,6 +5244,7 @@
 
 	spin_lock(&shrink_list_lock);
 
+rescan:
 	/* first scan for clean buffers */
 	list_for_each_entry_safe(dev_priv, next_dev,
 				 &shrink_list, mm.shrink_list) {
@@ -5229,7 +5261,7 @@
 					 &dev_priv->mm.inactive_list,
 					 list) {
 			if (i915_gem_object_is_purgeable(obj_priv)) {
-				i915_gem_object_unbind(obj_priv->obj);
+				i915_gem_object_unbind(&obj_priv->base);
 				if (--nr_to_scan <= 0)
 					break;
 			}
@@ -5258,7 +5290,7 @@
 					 &dev_priv->mm.inactive_list,
 					 list) {
 			if (nr_to_scan > 0) {
-				i915_gem_object_unbind(obj_priv->obj);
+				i915_gem_object_unbind(&obj_priv->base);
 				nr_to_scan--;
 			} else
 				cnt++;
@@ -5270,6 +5302,36 @@
 		would_deadlock = 0;
 	}
 
+	if (nr_to_scan) {
+		int active = 0;
+
+		/*
+		 * We are desperate for pages, so as a last resort, wait
+		 * for the GPU to finish and discard whatever we can.
+		 * This has a dramatic impact to reduce the number of
+		 * OOM-killer events whilst running the GPU aggressively.
+		 */
+		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+			struct drm_device *dev = dev_priv->dev;
+
+			if (!mutex_trylock(&dev->struct_mutex))
+				continue;
+
+			spin_unlock(&shrink_list_lock);
+
+			if (i915_gpu_is_active(dev)) {
+				i915_gpu_idle(dev);
+				active++;
+			}
+
+			spin_lock(&shrink_list_lock);
+			mutex_unlock(&dev->struct_mutex);
+		}
+
+		if (active)
+			goto rescan;
+	}
+
 	spin_unlock(&shrink_list_lock);
 
 	if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf..80f380b 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@
 	struct drm_i915_gem_object *obj_priv;
 
 	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-		obj = obj_priv->obj;
+		obj = &obj_priv->base;
 		if (obj_priv->pin_count || obj_priv->active ||
 		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
 					   I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4bdccef..4b7c49d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -283,6 +283,11 @@
 		return -EINVAL;
 	}
 
+	if (obj_priv->pin_count) {
+		drm_gem_object_unreference_unlocked(obj);
+		return -EBUSY;
+	}
+
 	if (args->tiling_mode == I915_TILING_NONE) {
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 		args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df6a9cd..8c3f080 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@
 
 	if (HAS_PCH_SPLIT(dev))
 		ironlake_enable_display_irq(dev_priv, DE_GSE);
-	else
+	else {
 		i915_enable_pipestat(dev_priv, 1,
 				     I915_LEGACY_BLC_EVENT_ENABLE);
+		if (IS_I965G(dev))
+			i915_enable_pipestat(dev_priv, 0,
+					     I915_LEGACY_BLC_EVENT_ENABLE);
+	}
 }
 
 /**
@@ -256,18 +260,18 @@
 						    hotplug_work);
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 
-	if (mode_config->num_connector) {
-		list_for_each_entry(connector, &mode_config->connector_list, head) {
-			struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	if (mode_config->num_encoder) {
+		list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+			struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	
 			if (intel_encoder->hot_plug)
 				(*intel_encoder->hot_plug) (intel_encoder);
 		}
 	}
 	/* Just fire off a uevent and let userspace tell us what to do */
-	drm_sysfs_hotplug_event(dev);
+	drm_helper_hpd_irq_event(dev);
 }
 
 static void i915_handle_rps_change(struct drm_device *dev)
@@ -612,7 +616,7 @@
 	batchbuffer[1] = NULL;
 	count = 0;
 	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-		struct drm_gem_object *obj = obj_priv->obj;
+		struct drm_gem_object *obj = &obj_priv->base;
 
 		if (batchbuffer[0] == NULL &&
 		    bbaddr >= obj_priv->gtt_offset &&
@@ -648,7 +652,7 @@
 	if (error->active_bo) {
 		int i = 0;
 		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-			struct drm_gem_object *obj = obj_priv->obj;
+			struct drm_gem_object *obj = &obj_priv->base;
 
 			error->active_bo[i].size = obj->size;
 			error->active_bo[i].name = obj->name;
@@ -950,7 +954,8 @@
 			intel_finish_page_flip(dev, 1);
 		}
 
-		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+		if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+		    (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
 		    (iir & I915_ASLE_INTERRUPT))
 			opregion_asle_intr(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc521..f3e39cc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
 #define   DP_LINK_TRAIN_MASK		(3 << 28)
 #define   DP_LINK_TRAIN_SHIFT		28
 
+/* CPT Link training mode */
+#define   DP_LINK_TRAIN_PAT_1_CPT	(0 << 8)
+#define   DP_LINK_TRAIN_PAT_2_CPT	(1 << 8)
+#define   DP_LINK_TRAIN_PAT_IDLE_CPT	(2 << 8)
+#define   DP_LINK_TRAIN_OFF_CPT		(3 << 8)
+#define   DP_LINK_TRAIN_MASK_CPT	(7 << 8)
+#define   DP_LINK_TRAIN_SHIFT_CPT	8
+
 /* Signal voltages. These are mostly controlled by the other end */
 #define   DP_VOLTAGE_0_4		(0 << 25)
 #define   DP_VOLTAGE_0_6		(1 << 25)
@@ -1924,7 +1932,10 @@
 /* Display & cursor control */
 
 /* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER	(1 << 4)
+#define PIPE_ENABLE_DITHER		(1 << 4)
+#define PIPE_DITHER_TYPE_MASK		(3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL	(0 << 2)
+#define PIPE_DITHER_TYPE_ST01		(1 << 2)
 /* Pipe A */
 #define PIPEADSL		0x70000
 #define PIPEACONF		0x70008
@@ -1988,15 +1999,24 @@
 
 #define DSPFW1			0x70034
 #define   DSPFW_SR_SHIFT	23
+#define   DSPFW_SR_MASK 	(0x1ff<<23)
 #define   DSPFW_CURSORB_SHIFT	16
+#define   DSPFW_CURSORB_MASK	(0x3f<<16)
 #define   DSPFW_PLANEB_SHIFT	8
+#define   DSPFW_PLANEB_MASK	(0x7f<<8)
+#define   DSPFW_PLANEA_MASK	(0x7f)
 #define DSPFW2			0x70038
 #define   DSPFW_CURSORA_MASK	0x00003f00
 #define   DSPFW_CURSORA_SHIFT	8
+#define   DSPFW_PLANEC_MASK	(0x7f)
 #define DSPFW3			0x7003c
 #define   DSPFW_HPLL_SR_EN	(1<<31)
 #define   DSPFW_CURSOR_SR_SHIFT	24
 #define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
+#define   DSPFW_CURSOR_SR_MASK		(0x3f<<24)
+#define   DSPFW_HPLL_CURSOR_SHIFT	16
+#define   DSPFW_HPLL_CURSOR_MASK	(0x3f<<16)
+#define   DSPFW_HPLL_SR_MASK		(0x1ff)
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE	64
@@ -2023,6 +2043,43 @@
 #define PINEVIEW_CURSOR_DFT_WM	0
 #define PINEVIEW_CURSOR_GUARD_WM	5
 
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK		0x45100
+#define  WM0_PIPE_PLANE_MASK	(0x7f<<16)
+#define  WM0_PIPE_PLANE_SHIFT	16
+#define  WM0_PIPE_SPRITE_MASK	(0x3f<<8)
+#define  WM0_PIPE_SPRITE_SHIFT	8
+#define  WM0_PIPE_CURSOR_MASK	(0x1f)
+
+#define WM0_PIPEB_ILK		0x45104
+#define WM1_LP_ILK		0x45108
+#define  WM1_LP_SR_EN		(1<<31)
+#define  WM1_LP_LATENCY_SHIFT	24
+#define  WM1_LP_LATENCY_MASK	(0x7f<<24)
+#define  WM1_LP_SR_MASK		(0x1ff<<8)
+#define  WM1_LP_SR_SHIFT	8
+#define  WM1_LP_CURSOR_MASK	(0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK		0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define  ILK_SRLT_MASK		0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO	128
+#define ILK_DISPLAY_MAXWM	64
+#define ILK_DISPLAY_DFTWM	8
+
+#define ILK_DISPLAY_SR_FIFO	512
+#define ILK_DISPLAY_MAX_SRWM	0x1ff
+#define ILK_DISPLAY_DFT_SRWM	0x3f
+#define ILK_CURSOR_SR_FIFO	64
+#define ILK_CURSOR_MAX_SRWM	0x3f
+#define ILK_CURSOR_DFT_SRWM	8
+
+#define ILK_FIFO_LINE_SIZE	64
+
 /*
  * The two pipe frame counter registers are not synchronized, so
  * reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2361,15 @@
 #define GTIIR   0x44018
 #define GTIER   0x4401c
 
+#define ILK_DISPLAY_CHICKEN2	0x42004
+#define  ILK_DPARB_GATE	(1<<22)
+#define  ILK_VSDPFD_FULL	(1<<21)
+#define ILK_DSPCLK_GATE		0x42020
+#define  ILK_DPARB_CLK_GATE	(1<<5)
+
 #define DISP_ARB_CTL	0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
+#define  DISP_FBC_WM_DIS		(1<<15)
 
 /* PCH */
 
@@ -2316,6 +2380,11 @@
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
 #define SDE_HOTPLUG_MASK	(0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT	(1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
@@ -2407,6 +2476,17 @@
 #define PCH_SSC4_PARMS          0xc6210
 #define PCH_SSC4_AUX_PARMS      0xc6214
 
+#define PCH_DPLL_SEL		0xc7000
+#define  TRANSA_DPLL_ENABLE	(1<<3)
+#define	 TRANSA_DPLLB_SEL	(1<<0)
+#define	 TRANSA_DPLLA_SEL	0
+#define  TRANSB_DPLL_ENABLE	(1<<7)
+#define	 TRANSB_DPLLB_SEL	(1<<4)
+#define	 TRANSB_DPLLA_SEL	(0)
+#define  TRANSC_DPLL_ENABLE	(1<<11)
+#define	 TRANSC_DPLLB_SEL	(1<<8)
+#define	 TRANSC_DPLLA_SEL	(0)
+
 /* transcoder */
 
 #define TRANS_HTOTAL_A          0xe0000
@@ -2493,6 +2573,19 @@
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+   SNB has different settings. */
+/* SNB A-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK		(0x3f<<22)
 #define  FDI_DP_PORT_WIDTH_X1           (0<<19)
 #define  FDI_DP_PORT_WIDTH_X2           (1<<19)
 #define  FDI_DP_PORT_WIDTH_X3           (2<<19)
@@ -2525,6 +2618,13 @@
 #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
 #define  FDI_SEL_RAWCLK                 (0<<4)
 #define  FDI_SEL_PCDCLK                 (1<<4)
+/* CPT */
+#define  FDI_AUTO_TRAINING			(1<<10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT		(1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT	(2<<8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
 
 #define FDI_RXA_MISC            0xf0010
 #define FDI_RXB_MISC            0xf1010
@@ -2596,6 +2696,9 @@
 #define  HSYNC_ACTIVE_HIGH      (1 << 3)
 #define  PORT_DETECTED          (1 << 2)
 
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB	HDMIB
+
 #define HDMIC   0xe1150
 #define HDMID   0xe1160
 
@@ -2653,4 +2756,42 @@
 #define PCH_DPD_AUX_CH_DATA4	0xe4320
 #define PCH_DPD_AUX_CH_DATA5	0xe4324
 
+/* CPT */
+#define  PORT_TRANS_A_SEL_CPT	0
+#define  PORT_TRANS_B_SEL_CPT	(1<<29)
+#define  PORT_TRANS_C_SEL_CPT	(2<<29)
+#define  PORT_TRANS_SEL_MASK	(3<<29)
+
+#define TRANS_DP_CTL_A		0xe0300
+#define TRANS_DP_CTL_B		0xe1300
+#define TRANS_DP_CTL_C		0xe2300
+#define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
+#define  TRANS_DP_PORT_SEL_B	(0<<29)
+#define  TRANS_DP_PORT_SEL_C	(1<<29)
+#define  TRANS_DP_PORT_SEL_D	(2<<29)
+#define  TRANS_DP_PORT_SEL_MASK	(3<<29)
+#define  TRANS_DP_AUDIO_ONLY	(1<<26)
+#define  TRANS_DP_ENH_FRAMING	(1<<18)
+#define  TRANS_DP_8BPC		(0<<9)
+#define  TRANS_DP_10BPC		(1<<9)
+#define  TRANS_DP_6BPC		(2<<9)
+#define  TRANS_DP_12BPC		(3<<9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH	(1<<4)
+#define  TRANS_DP_VSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH	(1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_LOW	0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a7..60a5800 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@
 	}
 	/* FIXME: save TV & SDVO state */
 
-	/* FBC state */
-	if (IS_GM45(dev)) {
-		dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
-	} else {
-		dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-		dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-		dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-		dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+	/* Only save FBC state on the platform that supports FBC */
+	if (I915_HAS_FBC(dev)) {
+		if (IS_GM45(dev)) {
+			dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+		} else {
+			dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+			dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+			dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+			dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+		}
 	}
 
 	/* VGA state */
@@ -702,18 +704,19 @@
 	}
 	/* FIXME: restore TV & SDVO state */
 
-	/* FBC info */
-	if (IS_GM45(dev)) {
-		g4x_disable_fbc(dev);
-		I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
-	} else {
-		i8xx_disable_fbc(dev);
-		I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-		I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-		I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-		I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+	/* only restore FBC info on the platform that supports FBC*/
+	if (I915_HAS_FBC(dev)) {
+		if (IS_GM45(dev)) {
+			g4x_disable_fbc(dev);
+			I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+		} else {
+			i8xx_disable_fbc(dev);
+			I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+			I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+			I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+			I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+		}
 	}
-
 	/* VGA state */
 	if (IS_IRONLAKE(dev))
 		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9..9e4c45f 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@
 		      __entry->obj, __entry->fence, __entry->tiling_mode)
 );
 
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
 
 	    TP_PROTO(struct drm_gem_object *obj),
 
@@ -132,21 +132,18 @@
 	    TP_printk("obj=%p", __entry->obj)
 );
 
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
 	    TP_PROTO(struct drm_gem_object *obj),
 
-	    TP_ARGS(obj),
+	    TP_ARGS(obj)
+);
 
-	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
-			     ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 
-	    TP_fast_assign(
-			   __entry->obj = obj;
-			   ),
+	    TP_PROTO(struct drm_gem_object *obj),
 
-	    TP_printk("obj=%p", __entry->obj)
+	    TP_ARGS(obj)
 );
 
 /* batch tracing */
@@ -197,8 +194,7 @@
 		      __entry->flush_domains, __entry->invalidate_domains)
 );
 
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
 
@@ -217,64 +213,35 @@
 	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
 );
 
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
 
-	    TP_ARGS(dev, seqno),
-
-	    TP_STRUCT__entry(
-			     __field(u32, dev)
-			     __field(u32, seqno)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   __entry->seqno = seqno;
-			   ),
-
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
 
-	    TP_ARGS(dev, seqno),
-
-	    TP_STRUCT__entry(
-			     __field(u32, dev)
-			     __field(u32, seqno)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   __entry->seqno = seqno;
-			   ),
-
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
 
-	    TP_ARGS(dev, seqno),
-
-	    TP_STRUCT__entry(
-			     __field(u32, dev)
-			     __field(u32, seqno)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   __entry->seqno = seqno;
-			   ),
-
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_ring_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DECLARE_EVENT_CLASS(i915_ring,
 
 	    TP_PROTO(struct drm_device *dev),
 
@@ -291,26 +258,23 @@
 	    TP_printk("dev=%u", __entry->dev)
 );
 
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
 
 	    TP_PROTO(struct drm_device *dev),
 
-	    TP_ARGS(dev),
+	    TP_ARGS(dev)
+);
 
-	    TP_STRUCT__entry(
-			     __field(u32, dev)
-			     ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
 
-	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   ),
+	    TP_PROTO(struct drm_device *dev),
 
-	    TP_printk("dev=%u", __entry->dev)
+	    TP_ARGS(dev)
 );
 
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
 #include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452..4c748d8 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@
 			p_mapping->dvo_port = p_child->dvo_port;
 			p_mapping->slave_addr = p_child->slave_addr;
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
+			p_mapping->ddc_pin = p_child->ddc_pin;
 			p_mapping->initialized = 1;
 		} else {
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef..e16ac5a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
 	if (intel_crtc->pipe == 0) {
-		adpa |= ADPA_PIPE_A_SELECT;
+		if (HAS_PCH_CPT(dev))
+			adpa |= PORT_TRANS_A_SEL_CPT;
+		else
+			adpa |= ADPA_PIPE_A_SELECT;
 		if (!HAS_PCH_SPLIT(dev))
 			I915_WRITE(BCLRPAT_A, 0);
 	} else {
-		adpa |= ADPA_PIPE_B_SELECT;
+		if (HAS_PCH_CPT(dev))
+			adpa |= PORT_TRANS_B_SEL_CPT;
+		else
+			adpa |= ADPA_PIPE_B_SELECT;
 		if (!HAS_PCH_SPLIT(dev))
 			I915_WRITE(BCLRPAT_B, 0);
 	}
@@ -152,15 +158,21 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 adpa;
+	u32 adpa, temp;
 	bool ret;
 
-	adpa = I915_READ(PCH_ADPA);
+	temp = adpa = I915_READ(PCH_ADPA);
 
-	adpa &= ~ADPA_CRT_HOTPLUG_MASK;
-	/* disable HPD first */
-	I915_WRITE(PCH_ADPA, adpa);
-	(void)I915_READ(PCH_ADPA);
+	if (HAS_PCH_CPT(dev)) {
+		/* Disable DAC before force detect */
+		I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+		(void)I915_READ(PCH_ADPA);
+	} else {
+		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+		/* disable HPD first */
+		I915_WRITE(PCH_ADPA, adpa);
+		(void)I915_READ(PCH_ADPA);
+	}
 
 	adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
 			ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@
 	while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
 		;
 
+	if (HAS_PCH_CPT(dev)) {
+		I915_WRITE(PCH_ADPA, temp);
+		(void)I915_READ(PCH_ADPA);
+	}
+
 	/* Check the status to see if both blue and green are on now */
 	adpa = I915_READ(PCH_ADPA);
 	adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@
 	return false;
 }
 
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
 	/* CRT should always be at 0, but check anyway */
 	if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@
 static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct drm_encoder *encoder = &intel_encoder->enc;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct drm_crtc *crtc;
 	int dpms_mode;
 	enum drm_connector_status status;
@@ -400,18 +417,19 @@
 			return connector_status_disconnected;
 	}
 
-	if (intel_crt_detect_ddc(connector))
+	if (intel_crt_detect_ddc(encoder))
 		return connector_status_connected;
 
 	/* for pre-945g platforms use load detect */
 	if (encoder->crtc && encoder->crtc->enabled) {
 		status = intel_crt_load_detect(encoder->crtc, intel_encoder);
 	} else {
-		crtc = intel_get_load_detect_pipe(intel_encoder,
+		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
 						  NULL, &dpms_mode);
 		if (crtc) {
 			status = intel_crt_load_detect(crtc, intel_encoder);
-			intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+			intel_release_load_detect_pipe(intel_encoder,
+						       connector, dpms_mode);
 		} else
 			status = connector_status_unknown;
 	}
@@ -421,9 +439,6 @@
 
 static void intel_crt_destroy(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-	intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
@@ -432,29 +447,27 @@
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
 	int ret;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct i2c_adapter *ddcbus;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct i2c_adapter *ddc_bus;
 	struct drm_device *dev = connector->dev;
 
 
-	ret = intel_ddc_get_modes(intel_encoder);
+	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 	if (ret || !IS_G4X(dev))
 		goto end;
 
-	ddcbus = intel_encoder->ddc_bus;
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
-	intel_encoder->ddc_bus =
-		intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+	ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
 
-	if (!intel_encoder->ddc_bus) {
-		intel_encoder->ddc_bus = ddcbus;
+	if (!ddc_bus) {
 		dev_printk(KERN_ERR, &connector->dev->pdev->dev,
 			   "DDC bus registration failed for CRTDDC_D.\n");
 		goto end;
 	}
 	/* Try to get modes by GPIOD port */
-	ret = intel_ddc_get_modes(intel_encoder);
-	intel_i2c_destroy(ddcbus);
+	ret = intel_ddc_get_modes(connector, ddc_bus);
+	intel_i2c_destroy(ddc_bus);
 
 end:
 	return ret;
@@ -491,12 +504,16 @@
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
 	.mode_valid = intel_crt_mode_valid,
 	.get_modes = intel_crt_get_modes,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_crt_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+	intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@
 {
 	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 i2c_reg;
 
@@ -514,14 +532,20 @@
 	if (!intel_encoder)
 		return;
 
-	connector = &intel_encoder->base;
-	drm_connector_init(dev, &intel_encoder->base,
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
+	connector = &intel_connector->base;
+	drm_connector_init(dev, &intel_connector->base,
 			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
 	drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
 			 DRM_MODE_ENCODER_DAC);
 
-	drm_mode_connector_attach_encoder(&intel_encoder->base,
+	drm_mode_connector_attach_encoder(&intel_connector->base,
 					  &intel_encoder->enc);
 
 	/* Set up the DDC bus. */
@@ -553,5 +577,10 @@
 
 	drm_sysfs_connector_add(connector);
 
+	if (I915_HAS_HOTPLUG(dev))
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+	else
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
 	dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f27e370..f469a84 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@
 {
     struct drm_device *dev = crtc->dev;
     struct drm_mode_config *mode_config = &dev->mode_config;
-    struct drm_connector *l_entry;
+    struct drm_encoder *l_entry;
 
-    list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-	    if (l_entry->encoder &&
-	        l_entry->encoder->crtc == crtc) {
-		    struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+    list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+	    if (l_entry && l_entry->crtc == crtc) {
+		    struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
 		    if (intel_encoder->type == type)
 			    return true;
 	    }
@@ -755,23 +754,6 @@
     return false;
 }
 
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
-    struct drm_device *dev = crtc->dev;
-    struct drm_mode_config *mode_config = &dev->mode_config;
-    struct drm_connector *l_entry, *ret = NULL;
-
-    list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-	    if (l_entry->encoder &&
-	        l_entry->encoder->crtc == crtc) {
-		    ret = l_entry;
-		    break;
-	    }
-    }
-    return ret;
-}
-
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 /**
  * Returns whether the given set of divisors are valid for a given refclk with
@@ -1066,9 +1048,8 @@
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
 {
-	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
 {
-	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->display.fbc_enabled)
+		return false;
+
+	return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+	if (!dev_priv->display.enable_fbc)
+		return;
+
+	dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->display.disable_fbc)
+		return;
+
+	dev_priv->display.disable_fbc(dev);
+}
+
 /**
  * intel_update_fbc - enable/disable FBC as needed
  * @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@
 	if (!i915_powersave)
 		return;
 
-	if (!dev_priv->display.fbc_enabled ||
-	    !dev_priv->display.enable_fbc ||
-	    !dev_priv->display.disable_fbc)
+	if (!I915_HAS_FBC(dev))
 		return;
 
 	if (!crtc->fb)
@@ -1216,28 +1224,25 @@
 		goto out_disable;
 	}
 
-	if (dev_priv->display.fbc_enabled(crtc)) {
+	if (intel_fbc_enabled(dev)) {
 		/* We can re-enable it in this case, but need to update pitch */
-		if (fb->pitch > dev_priv->cfb_pitch)
-			dev_priv->display.disable_fbc(dev);
-		if (obj_priv->fence_reg != dev_priv->cfb_fence)
-			dev_priv->display.disable_fbc(dev);
-		if (plane != dev_priv->cfb_plane)
-			dev_priv->display.disable_fbc(dev);
+		if ((fb->pitch > dev_priv->cfb_pitch) ||
+		    (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+		    (plane != dev_priv->cfb_plane))
+			intel_disable_fbc(dev);
 	}
 
-	if (!dev_priv->display.fbc_enabled(crtc)) {
-		/* Now try to turn it back on if possible */
-		dev_priv->display.enable_fbc(crtc, 500);
-	}
+	/* Now try to turn it back on if possible */
+	if (!intel_fbc_enabled(dev))
+		intel_enable_fbc(crtc, 500);
 
 	return;
 
 out_disable:
 	DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
 	/* Multiple disables should be harmless */
-	if (dev_priv->display.fbc_enabled(crtc))
-		dev_priv->display.disable_fbc(dev);
+	if (intel_fbc_enabled(dev))
+		intel_disable_fbc(dev);
 }
 
 static int
@@ -1510,6 +1515,219 @@
 	udelay(500);
 }
 
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+	u32 temp, tries = 0;
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	temp = I915_READ(fdi_tx_reg);
+	temp |= FDI_TX_ENABLE;
+	temp &= ~(7 << 19);
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(fdi_tx_reg, temp);
+	I915_READ(fdi_tx_reg);
+
+	temp = I915_READ(fdi_rx_reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+	I915_READ(fdi_rx_reg);
+	udelay(150);
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	temp = I915_READ(fdi_rx_imr_reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(fdi_rx_imr_reg, temp);
+	I915_READ(fdi_rx_imr_reg);
+	udelay(150);
+
+	for (;;) {
+		temp = I915_READ(fdi_rx_iir_reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if ((temp & FDI_RX_BIT_LOCK)) {
+			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			I915_WRITE(fdi_rx_iir_reg,
+				   temp | FDI_RX_BIT_LOCK);
+			break;
+		}
+
+		tries++;
+
+		if (tries > 5) {
+			DRM_DEBUG_KMS("FDI train 1 fail!\n");
+			break;
+		}
+	}
+
+	/* Train 2 */
+	temp = I915_READ(fdi_tx_reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(fdi_tx_reg, temp);
+
+	temp = I915_READ(fdi_rx_reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(fdi_rx_reg, temp);
+	udelay(150);
+
+	tries = 0;
+
+	for (;;) {
+		temp = I915_READ(fdi_rx_iir_reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			I915_WRITE(fdi_rx_iir_reg,
+				   temp | FDI_RX_SYMBOL_LOCK);
+			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			break;
+		}
+
+		tries++;
+
+		if (tries > 5) {
+			DRM_DEBUG_KMS("FDI train 2 fail!\n");
+			break;
+		}
+	}
+
+	DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+	u32 temp, i;
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	temp = I915_READ(fdi_tx_reg);
+	temp |= FDI_TX_ENABLE;
+	temp &= ~(7 << 19);
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	/* SNB-B */
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	I915_WRITE(fdi_tx_reg, temp);
+	I915_READ(fdi_tx_reg);
+
+	temp = I915_READ(fdi_rx_reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+	I915_READ(fdi_rx_reg);
+	udelay(150);
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	temp = I915_READ(fdi_rx_imr_reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(fdi_rx_imr_reg, temp);
+	I915_READ(fdi_rx_imr_reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++ ) {
+		temp = I915_READ(fdi_tx_reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(fdi_tx_reg, temp);
+		udelay(500);
+
+		temp = I915_READ(fdi_rx_iir_reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_BIT_LOCK) {
+			I915_WRITE(fdi_rx_iir_reg,
+				   temp | FDI_RX_BIT_LOCK);
+			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			break;
+		}
+	}
+	if (i == 4)
+		DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	temp = I915_READ(fdi_tx_reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	if (IS_GEN6(dev)) {
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		/* SNB-B */
+		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	}
+	I915_WRITE(fdi_tx_reg, temp);
+
+	temp = I915_READ(fdi_rx_reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_2;
+	}
+	I915_WRITE(fdi_rx_reg, temp);
+	udelay(150);
+
+	for (i = 0; i < 4; i++ ) {
+		temp = I915_READ(fdi_tx_reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(fdi_tx_reg, temp);
+		udelay(500);
+
+		temp = I915_READ(fdi_rx_iir_reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			I915_WRITE(fdi_rx_iir_reg,
+				   temp | FDI_RX_SYMBOL_LOCK);
+			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			break;
+		}
+	}
+	if (i == 4)
+		DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@
 	int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
 	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
 	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
-	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
 	int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
 	int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
 	int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@
 	int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
 	int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
 	int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
 	u32 temp;
-	int tries = 5, j, n;
+	int n;
 	u32 pipe_bpc;
 
 	temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@
 			/* enable eDP PLL */
 			ironlake_enable_pll_edp(crtc);
 		} else {
-			/* enable PCH DPLL */
-			temp = I915_READ(pch_dpll_reg);
-			if ((temp & DPLL_VCO_ENABLE) == 0) {
-				I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
-				I915_READ(pch_dpll_reg);
-			}
 
 			/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 			temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@
 			 */
 			temp &= ~(0x7 << 16);
 			temp |= (pipe_bpc << 11);
-			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
-					FDI_SEL_PCDCLK |
-					FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+			temp &= ~(7 << 19);
+			temp |= (intel_crtc->fdi_lanes - 1) << 19;
+			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+			I915_READ(fdi_rx_reg);
+			udelay(200);
+
+			/* Switch from Rawclk to PCDclk */
+			temp = I915_READ(fdi_rx_reg);
+			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
 			I915_READ(fdi_rx_reg);
 			udelay(200);
 
@@ -1629,91 +1846,32 @@
 		}
 
 		if (!HAS_eDP) {
-			/* enable CPU FDI TX and PCH FDI RX */
-			temp = I915_READ(fdi_tx_reg);
-			temp |= FDI_TX_ENABLE;
-			temp |= FDI_DP_PORT_WIDTH_X4; /* default */
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			temp |= FDI_LINK_TRAIN_PATTERN_1;
-			I915_WRITE(fdi_tx_reg, temp);
-			I915_READ(fdi_tx_reg);
+			/* For PCH output, training FDI link */
+			if (IS_GEN6(dev))
+				gen6_fdi_link_train(crtc);
+			else
+				ironlake_fdi_link_train(crtc);
 
-			temp = I915_READ(fdi_rx_reg);
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			temp |= FDI_LINK_TRAIN_PATTERN_1;
-			I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
-			I915_READ(fdi_rx_reg);
-
-			udelay(150);
-
-			/* Train FDI. */
-			/* umask FDI RX Interrupt symbol_lock and bit_lock bit
-			   for train result */
-			temp = I915_READ(fdi_rx_imr_reg);
-			temp &= ~FDI_RX_SYMBOL_LOCK;
-			temp &= ~FDI_RX_BIT_LOCK;
-			I915_WRITE(fdi_rx_imr_reg, temp);
-			I915_READ(fdi_rx_imr_reg);
-			udelay(150);
-
-			temp = I915_READ(fdi_rx_iir_reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-			if ((temp & FDI_RX_BIT_LOCK) == 0) {
-				for (j = 0; j < tries; j++) {
-					temp = I915_READ(fdi_rx_iir_reg);
-					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-								temp);
-					if (temp & FDI_RX_BIT_LOCK)
-						break;
-					udelay(200);
-				}
-				if (j != tries)
-					I915_WRITE(fdi_rx_iir_reg,
-							temp | FDI_RX_BIT_LOCK);
-				else
-					DRM_DEBUG_KMS("train 1 fail\n");
-			} else {
-				I915_WRITE(fdi_rx_iir_reg,
-						temp | FDI_RX_BIT_LOCK);
-				DRM_DEBUG_KMS("train 1 ok 2!\n");
+			/* enable PCH DPLL */
+			temp = I915_READ(pch_dpll_reg);
+			if ((temp & DPLL_VCO_ENABLE) == 0) {
+				I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+				I915_READ(pch_dpll_reg);
 			}
-			temp = I915_READ(fdi_tx_reg);
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			temp |= FDI_LINK_TRAIN_PATTERN_2;
-			I915_WRITE(fdi_tx_reg, temp);
+			udelay(200);
 
-			temp = I915_READ(fdi_rx_reg);
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			temp |= FDI_LINK_TRAIN_PATTERN_2;
-			I915_WRITE(fdi_rx_reg, temp);
-
-			udelay(150);
-
-			temp = I915_READ(fdi_rx_iir_reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-			if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
-				for (j = 0; j < tries; j++) {
-					temp = I915_READ(fdi_rx_iir_reg);
-					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-								temp);
-					if (temp & FDI_RX_SYMBOL_LOCK)
-						break;
-					udelay(200);
-				}
-				if (j != tries) {
-					I915_WRITE(fdi_rx_iir_reg,
-							temp | FDI_RX_SYMBOL_LOCK);
-					DRM_DEBUG_KMS("train 2 ok 1!\n");
-				} else
-					DRM_DEBUG_KMS("train 2 fail\n");
-			} else {
-				I915_WRITE(fdi_rx_iir_reg,
-						temp | FDI_RX_SYMBOL_LOCK);
-				DRM_DEBUG_KMS("train 2 ok 2!\n");
+			if (HAS_PCH_CPT(dev)) {
+				/* Be sure PCH DPLL SEL is set */
+				temp = I915_READ(PCH_DPLL_SEL);
+				if (trans_dpll_sel == 0 &&
+						(temp & TRANSA_DPLL_ENABLE) == 0)
+					temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+				else if (trans_dpll_sel == 1 &&
+						(temp & TRANSB_DPLL_ENABLE) == 0)
+					temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+				I915_WRITE(PCH_DPLL_SEL, temp);
+				I915_READ(PCH_DPLL_SEL);
 			}
-			DRM_DEBUG_KMS("train done\n");
 
 			/* set transcoder timing */
 			I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@
 			I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
 			I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
 
+			/* enable normal train */
+			temp = I915_READ(fdi_tx_reg);
+			temp &= ~FDI_LINK_TRAIN_NONE;
+			I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+					FDI_TX_ENHANCE_FRAME_ENABLE);
+			I915_READ(fdi_tx_reg);
+
+			temp = I915_READ(fdi_rx_reg);
+			if (HAS_PCH_CPT(dev)) {
+				temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+				temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+			} else {
+				temp &= ~FDI_LINK_TRAIN_NONE;
+				temp |= FDI_LINK_TRAIN_NONE;
+			}
+			I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+			I915_READ(fdi_rx_reg);
+
+			/* wait one idle pattern time */
+			udelay(100);
+
+			/* For PCH DP, enable TRANS_DP_CTL */
+			if (HAS_PCH_CPT(dev) &&
+			    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+				int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+				int reg;
+
+				reg = I915_READ(trans_dp_ctl);
+				reg &= ~TRANS_DP_PORT_SEL_MASK;
+				reg = TRANS_DP_OUTPUT_ENABLE |
+				      TRANS_DP_ENH_FRAMING |
+				      TRANS_DP_VSYNC_ACTIVE_HIGH |
+				      TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+				switch (intel_trans_dp_port_sel(crtc)) {
+				case PCH_DP_B:
+					reg |= TRANS_DP_PORT_SEL_B;
+					break;
+				case PCH_DP_C:
+					reg |= TRANS_DP_PORT_SEL_C;
+					break;
+				case PCH_DP_D:
+					reg |= TRANS_DP_PORT_SEL_D;
+					break;
+				default:
+					DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+					reg |= TRANS_DP_PORT_SEL_B;
+					break;
+				}
+
+				I915_WRITE(trans_dp_ctl, reg);
+				POSTING_READ(trans_dp_ctl);
+			}
+
 			/* enable PCH transcoder */
 			temp = I915_READ(transconf_reg);
 			/*
@@ -1738,23 +1950,6 @@
 			while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
 				;
 
-			/* enable normal */
-
-			temp = I915_READ(fdi_tx_reg);
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
-					FDI_TX_ENHANCE_FRAME_ENABLE);
-			I915_READ(fdi_tx_reg);
-
-			temp = I915_READ(fdi_rx_reg);
-			temp &= ~FDI_LINK_TRAIN_NONE;
-			I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
-					FDI_RX_ENHANCE_FRAME_ENABLE);
-			I915_READ(fdi_rx_reg);
-
-			/* wait one idle pattern time */
-			udelay(100);
-
 		}
 
 		intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@
 			I915_READ(pf_ctl_reg);
 		}
 		I915_WRITE(pf_win_size, 0);
+		POSTING_READ(pf_win_size);
+
 
 		/* disable CPU FDI tx and PCH FDI rx */
 		temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@
 		temp &= ~FDI_LINK_TRAIN_NONE;
 		temp |= FDI_LINK_TRAIN_PATTERN_1;
 		I915_WRITE(fdi_tx_reg, temp);
+		POSTING_READ(fdi_tx_reg);
 
 		temp = I915_READ(fdi_rx_reg);
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_PATTERN_1;
+		if (HAS_PCH_CPT(dev)) {
+			temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+			temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+		} else {
+			temp &= ~FDI_LINK_TRAIN_NONE;
+			temp |= FDI_LINK_TRAIN_PATTERN_1;
+		}
 		I915_WRITE(fdi_rx_reg, temp);
+		POSTING_READ(fdi_rx_reg);
 
 		udelay(100);
 
@@ -1859,6 +2063,7 @@
 				}
 			}
 		}
+
 		temp = I915_READ(transconf_reg);
 		/* BPC in transcoder is consistent with that in pipeconf */
 		temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@
 		I915_READ(transconf_reg);
 		udelay(100);
 
+		if (HAS_PCH_CPT(dev)) {
+			/* disable TRANS_DP_CTL */
+			int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+			int reg;
+
+			reg = I915_READ(trans_dp_ctl);
+			reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+			I915_WRITE(trans_dp_ctl, reg);
+			POSTING_READ(trans_dp_ctl);
+
+			/* disable DPLL_SEL */
+			temp = I915_READ(PCH_DPLL_SEL);
+			if (trans_dpll_sel == 0)
+				temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+			else
+				temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+			I915_WRITE(PCH_DPLL_SEL, temp);
+			I915_READ(PCH_DPLL_SEL);
+
+		}
+
 		/* disable PCH DPLL */
 		temp = I915_READ(pch_dpll_reg);
-		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
-			I915_READ(pch_dpll_reg);
-		}
+		I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+		I915_READ(pch_dpll_reg);
 
 		if (HAS_eDP) {
 			ironlake_disable_pll_edp(crtc);
 		}
 
+		/* Switch from PCDclk to Rawclk */
 		temp = I915_READ(fdi_rx_reg);
 		temp &= ~FDI_SEL_PCDCLK;
 		I915_WRITE(fdi_rx_reg, temp);
 		I915_READ(fdi_rx_reg);
 
+		/* Disable CPU FDI TX PLL */
+		temp = I915_READ(fdi_tx_reg);
+		I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+		I915_READ(fdi_tx_reg);
+		udelay(100);
+
 		temp = I915_READ(fdi_rx_reg);
 		temp &= ~FDI_RX_PLL_ENABLE;
 		I915_WRITE(fdi_rx_reg, temp);
 		I915_READ(fdi_rx_reg);
 
-		/* Disable CPU FDI TX PLL */
-		temp = I915_READ(fdi_tx_reg);
-		if ((temp & FDI_TX_PLL_ENABLE) != 0) {
-			I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
-			I915_READ(fdi_tx_reg);
-			udelay(100);
-		}
-
 		/* Wait for the clocks to turn off. */
 		udelay(100);
 		break;
@@ -2331,6 +2554,30 @@
 	I830_FIFO_LINE_SIZE
 };
 
+static struct intel_watermark_params ironlake_display_wm_info = {
+	ILK_DISPLAY_FIFO,
+	ILK_DISPLAY_MAXWM,
+	ILK_DISPLAY_DFTWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+	ILK_DISPLAY_SR_FIFO,
+	ILK_DISPLAY_MAX_SRWM,
+	ILK_DISPLAY_DFT_SRWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+	ILK_CURSOR_SR_FIFO,
+	ILK_CURSOR_MAX_SRWM,
+	ILK_CURSOR_DFT_SRWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@
 	DRM_INFO("Big FIFO is disabled\n");
 }
 
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
-				 int pixel_size)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg;
-	unsigned long wm;
-	struct cxsr_latency *latency;
-
-	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
-		dev_priv->mem_freq);
-	if (!latency) {
-		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-		pineview_disable_cxsr(dev);
-		return;
-	}
-
-	/* Display SR */
-	wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
-				latency->display_sr);
-	reg = I915_READ(DSPFW1);
-	reg &= 0x7fffff;
-	reg |= wm << 23;
-	I915_WRITE(DSPFW1, reg);
-	DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
-	/* cursor SR */
-	wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
-				latency->cursor_sr);
-	reg = I915_READ(DSPFW3);
-	reg &= ~(0x3f << 24);
-	reg |= (wm & 0x3f) << 24;
-	I915_WRITE(DSPFW3, reg);
-
-	/* Display HPLL off SR */
-	wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
-		latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
-	reg = I915_READ(DSPFW3);
-	reg &= 0xfffffe00;
-	reg |= wm & 0x1ff;
-	I915_WRITE(DSPFW3, reg);
-
-	/* cursor HPLL off SR */
-	wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
-				latency->cursor_hpll_disable);
-	reg = I915_READ(DSPFW3);
-	reg &= ~(0x3f << 16);
-	reg |= (wm & 0x3f) << 16;
-	I915_WRITE(DSPFW3, reg);
-	DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
-	/* activate cxsr */
-	reg = I915_READ(DSPFW3);
-	reg |= PINEVIEW_SELF_REFRESH_EN;
-	I915_WRITE(DSPFW3, reg);
-
-	DRM_INFO("Big FIFO is enabled\n");
-
-	return;
-}
-
 /*
  * Latency for FIFO fetches is dependent on several factors:
  *   - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@
 	return size;
 }
 
+static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
+			  int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+	unsigned long wm;
+	struct cxsr_latency *latency;
+	int sr_clock;
+
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+					 dev_priv->mem_freq);
+	if (!latency) {
+		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+		pineview_disable_cxsr(dev);
+		return;
+	}
+
+	if (!planea_clock || !planeb_clock) {
+		sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+		/* Display SR */
+		wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+					pixel_size, latency->display_sr);
+		reg = I915_READ(DSPFW1);
+		reg &= ~DSPFW_SR_MASK;
+		reg |= wm << DSPFW_SR_SHIFT;
+		I915_WRITE(DSPFW1, reg);
+		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+		/* cursor SR */
+		wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+					pixel_size, latency->cursor_sr);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_CURSOR_SR_MASK;
+		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+		I915_WRITE(DSPFW3, reg);
+
+		/* Display HPLL off SR */
+		wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+					pixel_size, latency->display_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_SR_MASK;
+		reg |= wm & DSPFW_HPLL_SR_MASK;
+		I915_WRITE(DSPFW3, reg);
+
+		/* cursor HPLL off SR */
+		wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+					pixel_size, latency->cursor_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_CURSOR_MASK;
+		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+		I915_WRITE(DSPFW3, reg);
+		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+		/* activate cxsr */
+		reg = I915_READ(DSPFW3);
+		reg |= PINEVIEW_SELF_REFRESH_EN;
+		I915_WRITE(DSPFW3, reg);
+		DRM_DEBUG_KMS("Self-refresh is enabled\n");
+	} else {
+		pineview_disable_cxsr(dev);
+		DRM_DEBUG_KMS("Self-refresh is disabled\n");
+	}
+}
+
 static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
 			  int planeb_clock, int sr_hdisplay, int pixel_size)
 {
@@ -2813,6 +3065,108 @@
 	I915_WRITE(FW_BLC, fwater_lo);
 }
 
+#define ILK_LP0_PLANE_LATENCY		700
+
+static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
+		       int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+	int sr_wm, cursor_wm;
+	unsigned long line_time_us;
+	int sr_clock, entries_required;
+	u32 reg_value;
+
+	/* Calculate and update the watermark for plane A */
+	if (planea_clock) {
+		entries_required = ((planea_clock / 1000) * pixel_size *
+				     ILK_LP0_PLANE_LATENCY) / 1000;
+		entries_required = DIV_ROUND_UP(entries_required,
+				   ironlake_display_wm_info.cacheline_size);
+		planea_wm = entries_required +
+			    ironlake_display_wm_info.guard_size;
+
+		if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+			planea_wm = ironlake_display_wm_info.max_wm;
+
+		cursora_wm = 16;
+		reg_value = I915_READ(WM0_PIPEA_ILK);
+		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+			     (cursora_wm & WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEA_ILK, reg_value);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+				"cursor: %d\n", planea_wm, cursora_wm);
+	}
+	/* Calculate and update the watermark for plane B */
+	if (planeb_clock) {
+		entries_required = ((planeb_clock / 1000) * pixel_size *
+				     ILK_LP0_PLANE_LATENCY) / 1000;
+		entries_required = DIV_ROUND_UP(entries_required,
+				   ironlake_display_wm_info.cacheline_size);
+		planeb_wm = entries_required +
+			    ironlake_display_wm_info.guard_size;
+
+		if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+			planeb_wm = ironlake_display_wm_info.max_wm;
+
+		cursorb_wm = 16;
+		reg_value = I915_READ(WM0_PIPEB_ILK);
+		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+			     (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEB_ILK, reg_value);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+				"cursor: %d\n", planeb_wm, cursorb_wm);
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 */
+	if (!planea_clock || !planeb_clock) {
+		int line_count;
+		/* Read the self-refresh latency. The unit is 0.5us */
+		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+		sr_clock = planea_clock ? planea_clock : planeb_clock;
+		line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+		/* Use ns/us then divide to preserve precision */
+		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+			       / 1000;
+
+		/* calculate the self-refresh watermark for display plane */
+		entries_required = line_count * sr_hdisplay * pixel_size;
+		entries_required = DIV_ROUND_UP(entries_required,
+				   ironlake_display_srwm_info.cacheline_size);
+		sr_wm = entries_required +
+			ironlake_display_srwm_info.guard_size;
+
+		/* calculate the self-refresh watermark for display cursor */
+		entries_required = line_count * pixel_size * 64;
+		entries_required = DIV_ROUND_UP(entries_required,
+				   ironlake_cursor_srwm_info.cacheline_size);
+		cursor_wm = entries_required +
+			    ironlake_cursor_srwm_info.guard_size;
+
+		/* configure watermark and enable self-refresh */
+		reg_value = I915_READ(WM1_LP_ILK);
+		reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+			       WM1_LP_CURSOR_MASK);
+		reg_value |= WM1_LP_SR_EN |
+			     (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+			     (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+		I915_WRITE(WM1_LP_ILK, reg_value);
+		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+				"cursor %d\n", sr_wm, cursor_wm);
+
+	} else {
+		/* Turn off self refresh if both pipes are enabled */
+		I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+	}
+}
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -2882,12 +3236,6 @@
 	if (enabled <= 0)
 		return;
 
-	/* Single plane configs can enable self refresh */
-	if (enabled == 1 && IS_PINEVIEW(dev))
-		pineview_enable_cxsr(dev, sr_clock, pixel_size);
-	else if (IS_PINEVIEW(dev))
-		pineview_disable_cxsr(dev);
-
 	dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
 				    sr_hdisplay, pixel_size);
 }
@@ -2924,7 +3272,8 @@
 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 	bool is_edp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct intel_encoder *intel_encoder = NULL;
 	const intel_limit_t *limit;
 	int ret;
 	struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@
 	int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
 	int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
 	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
 	int lvds_reg = LVDS;
 	u32 temp;
 	int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@
 
 	drm_vblank_pre_modeset(dev, pipe);
 
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
 
-		if (!connector->encoder || connector->encoder->crtc != crtc)
+		if (!encoder || encoder->crtc != crtc)
 			continue;
 
+		intel_encoder = enc_to_intel_encoder(encoder);
+
 		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
@@ -3043,14 +3395,12 @@
 
 	/* FDI link */
 	if (HAS_PCH_SPLIT(dev)) {
-		int lane, link_bw, bpp;
+		int lane = 0, link_bw, bpp;
 		/* eDP doesn't require FDI link, so just set DP M/N
 		   according to current link config */
 		if (is_edp) {
-			struct drm_connector *edp;
 			target_clock = mode->clock;
-			edp = intel_pipe_get_connector(crtc);
-			intel_edp_link_config(to_intel_encoder(edp),
+			intel_edp_link_config(intel_encoder,
 					&lane, &link_bw);
 		} else {
 			/* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@
 				target_clock = mode->clock;
 			else
 				target_clock = adjusted_mode->clock;
-			lane = 4;
 			link_bw = 270000;
 		}
 
@@ -3111,6 +3460,18 @@
 			bpp = 24;
 		}
 
+		if (!lane) {
+			/* 
+			 * Account for spread spectrum to avoid
+			 * oversubscribing the link. Max center spread
+			 * is 2.5%; use 5% for safety's sake.
+			 */
+			u32 bps = target_clock * bpp * 21 / 20;
+			lane = bps / (link_bw * 8) + 1;
+		}
+
+		intel_crtc->fdi_lanes = lane;
+
 		ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
 	}
 
@@ -3265,11 +3626,6 @@
 			pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
 	}
 
-	dspcntr |= DISPLAY_PLANE_ENABLE;
-	pipeconf |= PIPEACONF_ENABLE;
-	dpll |= DPLL_VCO_ENABLE;
-
-
 	/* Disable the panel fitter if it was on our pipe */
 	if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
 		I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@
 		udelay(150);
 	}
 
+	/* enable transcoder DPLL */
+	if (HAS_PCH_CPT(dev)) {
+		temp = I915_READ(PCH_DPLL_SEL);
+		if (trans_dpll_sel == 0)
+			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+		else
+			temp |=	(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+		I915_WRITE(PCH_DPLL_SEL, temp);
+		I915_READ(PCH_DPLL_SEL);
+		udelay(150);
+	}
+
 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
 	 * This is an exception to the general rule that mode_set doesn't turn
 	 * things on.
@@ -3303,7 +3671,18 @@
 			lvds_reg = PCH_LVDS;
 
 		lvds = I915_READ(lvds_reg);
-		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+		if (pipe == 1) {
+			if (HAS_PCH_CPT(dev))
+				lvds |= PORT_TRANS_B_SEL_CPT;
+			else
+				lvds |= LVDS_PIPEB_SELECT;
+		} else {
+			if (HAS_PCH_CPT(dev))
+				lvds &= ~PORT_TRANS_SEL_MASK;
+			else
+				lvds &= ~LVDS_PIPEB_SELECT;
+		}
 		/* set the corresponsding LVDS_BORDER bit */
 		lvds |= dev_priv->lvds_border_bits;
 		/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@
 		/* set the dithering flag */
 		if (IS_I965G(dev)) {
 			if (dev_priv->lvds_dither) {
-				if (HAS_PCH_SPLIT(dev))
+				if (HAS_PCH_SPLIT(dev)) {
 					pipeconf |= PIPE_ENABLE_DITHER;
-				else
+					pipeconf |= PIPE_DITHER_TYPE_ST01;
+				} else
 					lvds |= LVDS_ENABLE_DITHER;
 			} else {
-				if (HAS_PCH_SPLIT(dev))
+				if (HAS_PCH_SPLIT(dev)) {
 					pipeconf &= ~PIPE_ENABLE_DITHER;
-				else
+					pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+				} else
 					lvds &= ~LVDS_ENABLE_DITHER;
 			}
 		}
@@ -3337,6 +3718,20 @@
 	}
 	if (is_dp)
 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	else if (HAS_PCH_SPLIT(dev)) {
+		/* For non-DP output, clear any trans DP clock recovery setting.*/
+		if (pipe == 0) {
+			I915_WRITE(TRANSA_DATA_M1, 0);
+			I915_WRITE(TRANSA_DATA_N1, 0);
+			I915_WRITE(TRANSA_DP_LINK_M1, 0);
+			I915_WRITE(TRANSA_DP_LINK_N1, 0);
+		} else {
+			I915_WRITE(TRANSB_DATA_M1, 0);
+			I915_WRITE(TRANSB_DATA_N1, 0);
+			I915_WRITE(TRANSB_DP_LINK_M1, 0);
+			I915_WRITE(TRANSB_DP_LINK_N1, 0);
+		}
+	}
 
 	if (!is_edp) {
 		I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@
 			/* enable FDI RX PLL too */
 			temp = I915_READ(fdi_rx_reg);
 			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+			I915_READ(fdi_rx_reg);
+			udelay(200);
+
+			/* enable FDI TX PLL too */
+			temp = I915_READ(fdi_tx_reg);
+			I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+			I915_READ(fdi_tx_reg);
+
+			/* enable FDI RX PCDCLK */
+			temp = I915_READ(fdi_rx_reg);
+			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+			I915_READ(fdi_rx_reg);
 			udelay(200);
 		}
 	}
@@ -3671,6 +4078,7 @@
 };
 
 struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+					    struct drm_connector *connector,
 					    struct drm_display_mode *mode,
 					    int *dpms_mode)
 {
@@ -3729,7 +4137,7 @@
 	}
 
 	encoder->crtc = crtc;
-	intel_encoder->base.encoder = encoder;
+	connector->encoder = encoder;
 	intel_encoder->load_detect_temp = true;
 
 	intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@
 	return crtc;
 }
 
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+				    struct drm_connector *connector, int dpms_mode)
 {
 	struct drm_encoder *encoder = &intel_encoder->enc;
 	struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@
 
 	if (intel_encoder->load_detect_temp) {
 		encoder->crtc = NULL;
-		intel_encoder->base.encoder = NULL;
+		connector->encoder = NULL;
 		intel_encoder->load_detect_temp = false;
 		crtc->enabled = drm_helper_crtc_in_use(crtc);
 		drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@
 	return crtc;
 }
 
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
 {
 	int index_mask = 0;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 	int entry = 0;
 
-        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 		if (type_mask & intel_encoder->clone_mask)
 			index_mask |= (1 << entry);
 		entry++;
@@ -4411,7 +4820,7 @@
 static void intel_setup_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 
 	intel_crt_init(dev);
 
@@ -4426,9 +4835,8 @@
 			intel_dp_init(dev, DP_A);
 
 		if (I915_READ(HDMIB) & PORT_DETECTED) {
-			/* check SDVOB */
-			/* found = intel_sdvo_init(dev, HDMIB); */
-			found = 0;
+			/* PCH SDVOB multiplex with HDMIB */
+			found = intel_sdvo_init(dev, PCH_SDVOB);
 			if (!found)
 				intel_hdmi_init(dev, HDMIB);
 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@
 	if (SUPPORTS_TV(dev))
 		intel_tv_init(dev);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-		struct drm_encoder *encoder = &intel_encoder->enc;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
 		encoder->possible_crtcs = intel_encoder->crtc_mask;
-		encoder->possible_clones = intel_connector_clones(dev,
+		encoder->possible_clones = intel_encoder_clones(dev,
 						intel_encoder->clone_mask);
 	}
 }
@@ -4507,10 +4914,6 @@
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_device *dev = fb->dev;
-
-	if (fb->fbdev)
-		intelfb_remove(dev, fb);
 
 	drm_framebuffer_cleanup(fb);
 	drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@
 	.create_handle = intel_user_framebuffer_create_handle,
 };
 
-int intel_framebuffer_create(struct drm_device *dev,
-			     struct drm_mode_fb_cmd *mode_cmd,
-			     struct drm_framebuffer **fb,
-			     struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+			   struct intel_framebuffer *intel_fb,
+			   struct drm_mode_fb_cmd *mode_cmd,
+			   struct drm_gem_object *obj)
 {
-	struct intel_framebuffer *intel_fb;
 	int ret;
 
-	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-	if (!intel_fb)
-		return -ENOMEM;
-
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
 		DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@
 	}
 
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
 	intel_fb->obj = obj;
-
-	*fb = &intel_fb->base;
-
 	return 0;
 }
 
-
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
 			      struct drm_file *filp,
 			      struct drm_mode_fb_cmd *mode_cmd)
 {
 	struct drm_gem_object *obj;
-	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
 	int ret;
 
 	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
 	if (!obj)
 		return NULL;
 
-	ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb)
+		return NULL;
+
+	ret = intel_framebuffer_init(dev, intel_fb,
+				     mode_cmd, obj);
 	if (ret) {
 		drm_gem_object_unreference_unlocked(obj);
+		kfree(intel_fb);
 		return NULL;
 	}
 
-	return fb;
+	return &intel_fb->base;
 }
 
 static const struct drm_mode_config_funcs intel_mode_funcs = {
 	.fb_create = intel_user_framebuffer_create,
-	.fb_changed = intelfb_probe,
+	.output_poll_changed = intel_fb_output_poll_changed,
 };
 
 static struct drm_gem_object *
@@ -4594,7 +4993,7 @@
 	struct drm_gem_object *pwrctx;
 	int ret;
 
-	pwrctx = drm_gem_object_alloc(dev, 4096);
+	pwrctx = i915_gem_alloc_object(dev, 4096);
 	if (!pwrctx) {
 		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
 		return NULL;
@@ -4732,6 +5131,25 @@
 		}
 
 		I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+		/*
+		 * According to the spec the following bits should be set in
+		 * order to enable memory self-refresh
+		 * The bit 22/21 of 0x42004
+		 * The bit 5 of 0x42020
+		 * The bit 15 of 0x45000
+		 */
+		if (IS_IRONLAKE(dev)) {
+			I915_WRITE(ILK_DISPLAY_CHICKEN2,
+					(I915_READ(ILK_DISPLAY_CHICKEN2) |
+					ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+			I915_WRITE(ILK_DSPCLK_GATE,
+					(I915_READ(ILK_DSPCLK_GATE) |
+						ILK_DPARB_CLK_GATE));
+			I915_WRITE(DISP_ARB_CTL,
+					(I915_READ(DISP_ARB_CTL) |
+						DISP_FBC_WM_DIS));
+		}
 		return;
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@
 	else
 		dev_priv->display.dpms = i9xx_crtc_dpms;
 
-	/* Only mobile has FBC, leave pointers NULL for other chips */
-	if (IS_MOBILE(dev)) {
+	if (I915_HAS_FBC(dev)) {
 		if (IS_GM45(dev)) {
 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
 			dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5264,31 @@
 			i830_get_display_clock_speed;
 
 	/* For FIFO watermark updates */
-	if (HAS_PCH_SPLIT(dev))
-		dev_priv->display.update_wm = NULL;
-	else if (IS_G4X(dev))
+	if (HAS_PCH_SPLIT(dev)) {
+		if (IS_IRONLAKE(dev)) {
+			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+				dev_priv->display.update_wm = ironlake_update_wm;
+			else {
+				DRM_DEBUG_KMS("Failed to get proper latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+		} else
+			dev_priv->display.update_wm = NULL;
+	} else if (IS_PINEVIEW(dev)) {
+		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+					    dev_priv->fsb_freq,
+					    dev_priv->mem_freq)) {
+			DRM_INFO("failed to find known CxSR latency "
+				 "(found fsb freq %d, mem freq %d), "
+				 "disabling CxSR\n",
+				 dev_priv->fsb_freq, dev_priv->mem_freq);
+			/* Disable CxSR and never update its watermark again */
+			pineview_disable_cxsr(dev);
+			dev_priv->display.update_wm = NULL;
+		} else
+			dev_priv->display.update_wm = pineview_update_wm;
+	} else if (IS_G4X(dev))
 		dev_priv->display.update_wm = g4x_update_wm;
 	else if (IS_I965G(dev))
 		dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5362,6 @@
 		    (unsigned long)dev);
 
 	intel_setup_overlay(dev);
-
-	if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
-							dev_priv->fsb_freq,
-							dev_priv->mem_freq))
-		DRM_INFO("failed to find known CxSR latency "
-			 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
-			 dev_priv->fsb_freq, dev_priv->mem_freq);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5372,9 @@
 
 	mutex_lock(&dev->struct_mutex);
 
+	drm_kms_helper_poll_fini(dev);
+	intel_fbdev_fini(dev);
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		/* Skip inactive CRTCs */
 		if (!crtc->fb)
@@ -4974,14 +5409,29 @@
 }
 
 
-/* current intel driver doesn't take advantage of encoders
-   always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int i;
 
-	return &intel_encoder->enc;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev,
+                                           connector->encoder_ids[i],
+                                           DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cf..6b1c9a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@
 	uint32_t output_reg;
 	uint32_t DP;
 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-	uint32_t save_DP;
-	uint8_t  save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
 	bool has_audio;
 	int dpms_mode;
 	uint8_t link_bw;
@@ -141,7 +139,8 @@
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
 	int max_lanes = intel_dp_max_lane_count(intel_encoder);
 
@@ -215,7 +214,7 @@
 {
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint32_t output_reg = dp_priv->output_reg;
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t ch_ctl = output_reg + 0x10;
 	uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@
 	uint32_t ctl;
 	uint32_t status;
 	uint32_t aux_clock_divider;
-	int try;
+	int try, precharge;
 
 	/* The clock divider is based off the hrawclk,
 	 * and would like to run at 2MHz. So, take the
 	 * hrawclk value and divide by 2 and use that
 	 */
-	if (IS_eDP(intel_encoder))
-		aux_clock_divider = 225; /* eDP input clock at 450Mhz */
-	else if (HAS_PCH_SPLIT(dev))
+	if (IS_eDP(intel_encoder)) {
+		if (IS_GEN6(dev))
+			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+		else
+			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+	} else if (HAS_PCH_SPLIT(dev))
 		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
 	else
 		aux_clock_divider = intel_hrawclk(dev) / 2;
 
+	if (IS_GEN6(dev))
+		precharge = 3;
+	else
+		precharge = 5;
+
 	/* Must try at least 3 times according to DP spec */
 	for (try = 0; try < 5; try++) {
 		/* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@
 		ctl = (DP_AUX_CH_CTL_SEND_BUSY |
 		       DP_AUX_CH_CTL_TIME_OUT_400us |
 		       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-		       (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+		       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 		       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
 		       DP_AUX_CH_CTL_DONE |
 		       DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@
 }
 
 static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+		  struct intel_connector *intel_connector, const char *name)
 {
 	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
 
@@ -480,7 +488,7 @@
 	strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
 	dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
 	dp_priv->adapter.algo_data = &dp_priv->algo;
-	dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+	dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
 	
 	return i2c_dp_aux_add_bus(&dp_priv->adapter);
 }
@@ -555,7 +563,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int lane_count = 4;
@@ -564,13 +572,16 @@
 	/*
 	 * Find the lane count in the intel_encoder private
 	 */
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-		struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct intel_encoder *intel_encoder;
+		struct intel_dp_priv *dp_priv;
 
-		if (!connector->encoder || connector->encoder->crtc != crtc)
+		if (!encoder || encoder->crtc != crtc)
 			continue;
 
+		intel_encoder = enc_to_intel_encoder(encoder);
+		dp_priv = intel_encoder->dev_priv;
+
 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
 			lane_count = dp_priv->lane_count;
 			break;
@@ -626,16 +637,24 @@
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = encoder->dev;
 	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	struct drm_crtc *crtc = intel_encoder->enc.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	dp_priv->DP = (DP_LINK_TRAIN_OFF |
-			DP_VOLTAGE_0_4 |
-			DP_PRE_EMPHASIS_0 |
-			DP_SYNC_VS_HIGH |
-			DP_SYNC_HS_HIGH);
+	dp_priv->DP = (DP_VOLTAGE_0_4 |
+		       DP_PRE_EMPHASIS_0);
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dp_priv->DP |= DP_SYNC_HS_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+		dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+	else
+		dp_priv->DP |= DP_LINK_TRAIN_OFF;
 
 	switch (dp_priv->lane_count) {
 	case 1:
@@ -664,7 +683,8 @@
 		dp_priv->DP |= DP_ENHANCED_FRAMING;
 	}
 
-	if (intel_crtc->pipe == 1)
+	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
+	if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
 		dp_priv->DP |= DP_PIPEB_SELECT;
 
 	if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@
 {
 	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t dp_reg = I915_READ(dp_priv->output_reg);
 
@@ -749,20 +769,6 @@
 	return link_status[r - DP_LANE0_1_STATUS];
 }
 
-static void
-intel_dp_save(struct drm_connector *connector)
-{
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct drm_device *dev = intel_encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
-	dp_priv->save_DP = I915_READ(dp_priv->output_reg);
-	intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
-				 dp_priv->save_link_configuration,
-				 sizeof (dp_priv->save_link_configuration));
-}
-
 static uint8_t
 intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
 				 int lane)
@@ -892,6 +898,25 @@
 	return signal_levels;
 }
 
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+	switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+	}
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
 		      int lane)
@@ -948,7 +973,7 @@
 			uint8_t train_set[4],
 			bool first)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	int ret;
@@ -974,7 +999,7 @@
 intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
 		    uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint8_t	train_set[4];
@@ -985,23 +1010,38 @@
 	bool channel_eq = false;
 	bool first = true;
 	int tries;
+	u32 reg;
 
 	/* Write the link configuration data */
-	intel_dp_aux_native_write(intel_encoder, 0x100,
+	intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
 				  link_configuration, DP_LINK_CONFIGURATION_SIZE);
 
 	DP |= DP_PORT_EN;
-	DP &= ~DP_LINK_TRAIN_MASK;
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+	else
+		DP &= ~DP_LINK_TRAIN_MASK;
 	memset(train_set, 0, 4);
 	voltage = 0xff;
 	tries = 0;
 	clock_recovery = false;
 	for (;;) {
 		/* Use train_set[0] to set the voltage and pre emphasis values */
-		uint32_t    signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
-		DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+		uint32_t    signal_levels;
+		if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+		} else {
+			signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+		}
 
-		if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+		else
+			reg = DP | DP_LINK_TRAIN_PAT_1;
+
+		if (!intel_dp_set_link_train(intel_encoder, reg,
 					     DP_TRAINING_PATTERN_1, train_set, first))
 			break;
 		first = false;
@@ -1041,11 +1081,23 @@
 	channel_eq = false;
 	for (;;) {
 		/* Use train_set[0] to set the voltage and pre emphasis values */
-		uint32_t    signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
-		DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+		uint32_t    signal_levels;
+
+		if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+		} else {
+			signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+		}
+
+		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+		else
+			reg = DP | DP_LINK_TRAIN_PAT_2;
 
 		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+		if (!intel_dp_set_link_train(intel_encoder, reg,
 					     DP_TRAINING_PATTERN_2, train_set,
 					     false))
 			break;
@@ -1068,7 +1120,12 @@
 		++tries;
 	}
 
-	I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+		reg = DP | DP_LINK_TRAIN_OFF_CPT;
+	else
+		reg = DP | DP_LINK_TRAIN_OFF;
+
+	I915_WRITE(dp_priv->output_reg, reg);
 	POSTING_READ(dp_priv->output_reg);
 	intel_dp_aux_native_write_1(intel_encoder,
 				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@
 static void
 intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 
@@ -1090,9 +1147,15 @@
 		udelay(100);
 	}
 
-	DP &= ~DP_LINK_TRAIN_MASK;
-	I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
-	POSTING_READ(dp_priv->output_reg);
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+		I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+		POSTING_READ(dp_priv->output_reg);
+	} else {
+		DP &= ~DP_LINK_TRAIN_MASK;
+		I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+		POSTING_READ(dp_priv->output_reg);
+	}
 
 	udelay(17000);
 
@@ -1102,18 +1165,6 @@
 	POSTING_READ(dp_priv->output_reg);
 }
 
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
-	if (dp_priv->save_DP & DP_PORT_EN)
-		intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
-	else
-		intel_dp_link_down(intel_encoder,  dp_priv->save_DP);
-}
-
 /*
  * According to DP spec
  * 5.1.2:
@@ -1144,7 +1195,8 @@
 static enum drm_connector_status
 ironlake_dp_detect(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	enum drm_connector_status status;
 
@@ -1168,8 +1220,9 @@
 static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint32_t temp, bit;
@@ -1180,16 +1233,6 @@
 	if (HAS_PCH_SPLIT(dev))
 		return ironlake_dp_detect(connector);
 
-	temp = I915_READ(PORT_HOTPLUG_EN);
-
-	I915_WRITE(PORT_HOTPLUG_EN,
-	       temp |
-	       DPB_HOTPLUG_INT_EN |
-	       DPC_HOTPLUG_INT_EN |
-	       DPD_HOTPLUG_INT_EN);
-
-	POSTING_READ(PORT_HOTPLUG_EN);
-
 	switch (dp_priv->output_reg) {
 	case DP_B:
 		bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
 	/* We should parse the EDID data and find out if it has an audio sink
 	 */
 
-	ret = intel_ddc_get_modes(intel_encoder);
+	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 	if (ret)
 		return ret;
 
@@ -1249,13 +1293,9 @@
 static void
 intel_dp_destroy (struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-	kfree(intel_encoder);
+	kfree(connector);
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_dp_save,
-	.restore = intel_dp_restore,
 	.detect = intel_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@
 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
 	.get_modes = intel_dp_get_modes,
 	.mode_valid = intel_dp_mode_valid,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_dp_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+	if (intel_encoder->i2c_bus)
+		intel_i2c_destroy(intel_encoder->i2c_bus);
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@
 		intel_dp_check_link_status(intel_encoder);
 }
 
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_encoder *encoder;
+	struct intel_encoder *intel_encoder = NULL;
+
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		if (!encoder || encoder->crtc != crtc)
+			continue;
+
+		intel_encoder = enc_to_intel_encoder(encoder);
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+			struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+			return dp_priv->output_reg;
+		}
+	}
+	return -1;
+}
+
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct intel_dp_priv *dp_priv;
 	const char *name = NULL;
 
@@ -1313,13 +1379,21 @@
 	if (!intel_encoder)
 		return;
 
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
 	dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
 
-	connector = &intel_encoder->base;
+	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_dp_connector_funcs,
 			   DRM_MODE_CONNECTOR_DisplayPort);
 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
 	if (output_reg == DP_A)
 		intel_encoder->type = INTEL_OUTPUT_EDP;
 	else
@@ -1349,7 +1423,7 @@
 			 DRM_MODE_ENCODER_TMDS);
 	drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
 
-	drm_mode_connector_attach_encoder(&intel_encoder->base,
+	drm_mode_connector_attach_encoder(&intel_connector->base,
 					  &intel_encoder->enc);
 	drm_sysfs_connector_add(connector);
 
@@ -1378,7 +1452,7 @@
 			break;
 	}
 
-	intel_dp_i2c_init(intel_encoder, name);
+	intel_dp_i2c_init(intel_encoder, intel_connector, name);
 
 	intel_encoder->ddc_bus = &dp_priv->adapter;
 	intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e302537..df931f7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@
 
 
 struct intel_encoder {
-	struct drm_connector base;
-
 	struct drm_encoder enc;
 	int type;
 	struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@
 	int clone_mask;
 };
 
+struct intel_connector {
+	struct drm_connector base;
+	void *dev_priv;
+};
+
 struct intel_crtc;
 struct intel_overlay {
 	struct drm_device *dev;
@@ -149,17 +152,18 @@
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
+	int fdi_lanes;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
 struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
 				     const char *name);
 void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
 void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
 void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
 
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
 
 extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 						    struct drm_crtc *crtc);
@@ -192,17 +196,16 @@
 extern void intel_wait_for_vblank(struct drm_device *dev);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+						   struct drm_connector *connector,
 						   struct drm_display_mode *mode,
 						   int *dpms_mode);
 extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+					   struct drm_connector *connector,
 					   int dpms_mode);
 
 extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
 extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
 extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
 extern void intelfb_restore(void);
 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 				    u16 blue, int regno);
@@ -212,10 +215,12 @@
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
-extern int intel_framebuffer_create(struct drm_device *dev,
-				    struct drm_mode_fb_cmd *mode_cmd,
-				    struct drm_framebuffer **fb,
-				    struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+				  struct intel_framebuffer *ifb,
+				  struct drm_mode_fb_cmd *mode_cmd,
+				  struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
 
 extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
 extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@
 				   struct drm_file *file_priv);
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 			       struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c..227feca 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@
 	}
 }
 
-static void intel_dvo_save(struct drm_connector *connector)
-{
-	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-	/* Each output should probably just save the registers it touches,
-	 * but for now, use more overkill.
-	 */
-	dev_priv->saveDVOA = I915_READ(DVOA);
-	dev_priv->saveDVOB = I915_READ(DVOB);
-	dev_priv->saveDVOC = I915_READ(DVOC);
-
-	dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
-	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-	dvo->dev_ops->restore(dvo);
-
-	I915_WRITE(DVOA, dev_priv->saveDVOA);
-	I915_WRITE(DVOB, dev_priv->saveDVOB);
-	I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
 static int intel_dvo_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@
  */
 static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
 	return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
 	/* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@
 	 * (TV-out, for example), but for now with just TMDS and LVDS,
 	 * that's not the case.
 	 */
-	intel_ddc_get_modes(intel_encoder);
+	intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 	if (!list_empty(&connector->probed_modes))
 		return 1;
 
@@ -275,39 +249,11 @@
 
 static void intel_dvo_destroy (struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-	if (dvo) {
-		if (dvo->dev_ops->destroy)
-			dvo->dev_ops->destroy(dvo);
-		if (dvo->panel_fixed_mode)
-			kfree(dvo->panel_fixed_mode);
-		/* no need, in i830_dvoices[] now */
-		//kfree(dvo);
-	}
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-	kfree(intel_encoder);
+	kfree(connector);
 }
 
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-	int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
-	return intel_pipe_to_crtc(pScrn, pipe);
-}
-#endif
-
 static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
 	.dpms = intel_dvo_dpms,
 	.mode_fixup = intel_dvo_mode_fixup,
@@ -318,8 +264,6 @@
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_dvo_save,
-	.restore = intel_dvo_restore,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
 	.mode_valid = intel_dvo_mode_valid,
 	.get_modes = intel_dvo_get_modes,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+	if (dvo) {
+		if (dvo->dev_ops->destroy)
+			dvo->dev_ops->destroy(dvo);
+		if (dvo->panel_fixed_mode)
+			kfree(dvo->panel_fixed_mode);
+	}
+	if (intel_encoder->i2c_bus)
+		intel_i2c_destroy(intel_encoder->i2c_bus);
+	if (intel_encoder->ddc_bus)
+		intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 	uint32_t dvo_reg = dvo->dvo_reg;
 	uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@
 void intel_dvo_init(struct drm_device *dev)
 {
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct intel_dvo_device *dvo;
 	struct i2c_adapter *i2cbus = NULL;
 	int ret = 0;
@@ -393,6 +353,12 @@
 	if (!intel_encoder)
 		return;
 
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
 	/* Set up the DDC bus */
 	intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
 	if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@
 
 	/* Now, try to find a controller */
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
-		struct drm_connector *connector = &intel_encoder->base;
+		struct drm_connector *connector = &intel_connector->base;
 		int gpio;
 
 		dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@
 		drm_encoder_helper_add(&intel_encoder->enc,
 				       &intel_dvo_helper_funcs);
 
-		drm_mode_connector_attach_encoder(&intel_encoder->base,
+		drm_mode_connector_attach_encoder(&intel_connector->base,
 						  &intel_encoder->enc);
 		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
 			/* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@
 		intel_i2c_destroy(i2cbus);
 free_intel:
 	kfree(intel_encoder);
+	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bc..6f53cf7 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-struct intelfb_par {
+struct intel_fbdev {
 	struct drm_fb_helper helper;
-	struct intel_framebuffer *intel_fb;
+	struct intel_framebuffer ifb;
+	struct list_head fbdev_list;
 	struct drm_display_mode *our_mode;
 };
 
@@ -54,7 +55,6 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_setcolreg = drm_fb_helper_setcolreg,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@
 	.fb_setcmap = drm_fb_helper_setcmap,
 };
 
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
-	.gamma_set = intel_crtc_fb_gamma_set,
-	.gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+			  struct drm_fb_helper_surface_size *sizes)
 {
+	struct drm_device *dev = ifbdev->helper.dev;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
-	struct drm_display_mode *mode = crtc->desired_mode;
-
-	fb = crtc->fb;
-	if (!fb)
-		return 1;
-
-	info = fb->fbdev;
-	if (!info)
-		return 1;
-
-	if (!mode)
-		return 1;
-
-	info->var.xres = mode->hdisplay;
-	info->var.right_margin = mode->hsync_start - mode->hdisplay;
-	info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-	info->var.left_margin = mode->htotal - mode->hsync_end;
-	info->var.yres = mode->vdisplay;
-	info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-	info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-	info->var.upper_margin = mode->vtotal - mode->vsync_end;
-	info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
-	/* avoid overflow */
-	info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
-	return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
-			  uint32_t fb_height, uint32_t surface_width,
-			  uint32_t surface_height,
-			  uint32_t surface_depth, uint32_t surface_bpp,
-			  struct drm_framebuffer **fb_p)
-{
-	struct fb_info *info;
-	struct intelfb_par *par;
-	struct drm_framebuffer *fb;
-	struct intel_framebuffer *intel_fb;
 	struct drm_mode_fb_cmd mode_cmd;
 	struct drm_gem_object *fbo = NULL;
 	struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@
 	int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 
 	/* we don't do packed 24bpp */
-	if (surface_bpp == 24)
-		surface_bpp = 32;
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
 
-	mode_cmd.width = surface_width;
-	mode_cmd.height = surface_height;
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
 
-	mode_cmd.bpp = surface_bpp;
+	mode_cmd.bpp = sizes->surface_bpp;
 	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
-	mode_cmd.depth = surface_depth;
+	mode_cmd.depth = sizes->surface_depth;
 
 	size = mode_cmd.pitch * mode_cmd.height;
 	size = ALIGN(size, PAGE_SIZE);
-	fbo = drm_gem_object_alloc(dev, size);
+	fbo = i915_gem_alloc_object(dev, size);
 	if (!fbo) {
 		DRM_ERROR("failed to allocate framebuffer\n");
 		ret = -ENOMEM;
@@ -157,45 +107,37 @@
 	/* Flush everything out, we'll be doing GTT only from now on */
 	i915_gem_object_set_to_gtt_domain(fbo, 1);
 
-	ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
-	if (ret) {
-		DRM_ERROR("failed to allocate fb.\n");
-		goto out_unpin;
-	}
-
-	list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
-	intel_fb = to_intel_framebuffer(fb);
-	*fb_p = fb;
-
-	info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+	info = framebuffer_alloc(0, device);
 	if (!info) {
 		ret = -ENOMEM;
 		goto out_unpin;
 	}
 
-	par = info->par;
+	info->par = ifbdev;
 
-	par->helper.funcs = &intel_fb_helper_funcs;
-	par->helper.dev = dev;
-	ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
-					    INTELFB_CONN_LIMIT);
-	if (ret)
-		goto out_unref;
+	intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+	fb = &ifbdev->ifb.base;
+
+	ifbdev->helper.fb = fb;
+	ifbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "inteldrmfb");
 
 	info->flags = FBINFO_DEFAULT;
-
 	info->fbops = &intelfb_ops;
 
-
 	/* setup aperture base/size for vesafb takeover */
-	info->aperture_base = dev->mode_config.fb_base;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	if (IS_I9XX(dev))
-		info->aperture_size = pci_resource_len(dev->pdev, 2);
+		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
 	else
-		info->aperture_size = pci_resource_len(dev->pdev, 0);
+		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
 	info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
 	info->fix.smem_len = size;
@@ -208,12 +150,18 @@
 		ret = -ENOSPC;
 		goto out_unpin;
 	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
 	info->screen_size = size;
 
 //	memset(info->screen_base, 0, size);
 
 	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* FIXME: we really shouldn't expose mmio space at all */
 	info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
 
-	fb->fbdev = info;
-
-	par->intel_fb = intel_fb;
-
-	/* To allow resizeing without swapping buffers */
 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
-			intel_fb->base.width, intel_fb->base.height,
-			obj_priv->gtt_offset, fbo);
+		      fb->width, fb->height,
+		      obj_priv->gtt_offset, fbo);
+
 
 	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@
 	return ret;
 }
 
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+					  struct drm_fb_helper_surface_size *sizes)
 {
+	struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+	int new_fb = 0;
 	int ret;
 
-	DRM_DEBUG_KMS("\n");
-	ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
-	return ret;
+	if (!helper->fb) {
+		ret = intelfb_create(ifbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
 }
-EXPORT_SYMBOL(intelfb_probe);
 
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+	.gamma_set = intel_crtc_fb_gamma_set,
+	.gamma_get = intel_crtc_fb_gamma_get,
+	.fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+			struct intel_fbdev *ifbdev)
 {
 	struct fb_info *info;
+	struct intel_framebuffer *ifb = &ifbdev->ifb;
 
-	if (!fb)
-		return -EINVAL;
-
-	info = fb->fbdev;
-
-	if (info) {
-		struct intelfb_par *par = info->par;
+	if (ifbdev->helper.fbdev) {
+		info = ifbdev->helper.fbdev;
 		unregister_framebuffer(info);
 		iounmap(info->screen_base);
-		if (info->par)
-			drm_fb_helper_free(&par->helper);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
 		framebuffer_release(info);
 	}
 
+	drm_fb_helper_fini(&ifbdev->helper);
+
+	drm_framebuffer_cleanup(&ifb->base);
+	if (ifb->obj)
+		drm_gem_object_unreference_unlocked(ifb->obj);
+
 	return 0;
 }
-EXPORT_SYMBOL(intelfb_remove);
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+	struct intel_fbdev *ifbdev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+	if (!ifbdev)
+		return -ENOMEM;
+
+	dev_priv->fbdev = ifbdev;
+	ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+	drm_fb_helper_init(dev, &ifbdev->helper, 2,
+			   INTELFB_CONN_LIMIT);
+
+	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+	drm_fb_helper_initial_config(&ifbdev->helper, 32);
+	return 0;
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	if (!dev_priv->fbdev)
+		return;
+
+	intel_fbdev_destroy(dev, dev_priv->fbdev);
+	kfree(dev_priv->fbdev);
+	dev_priv->fbdev = NULL;
+}
 MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0..65727f0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
 
 struct intel_hdmi_priv {
 	u32 sdvox_reg;
-	u32 save_SDVOX;
 	bool has_hdmi_sink;
 };
 
@@ -63,8 +62,12 @@
 	if (hdmi_priv->has_hdmi_sink)
 		sdvox |= SDVO_AUDIO_ENABLE;
 
-	if (intel_crtc->pipe == 1)
-		sdvox |= SDVO_PIPE_B_SELECT;
+	if (intel_crtc->pipe == 1) {
+		if (HAS_PCH_CPT(dev))
+			sdvox |= PORT_TRANS_B_SEL_CPT;
+		else
+			sdvox |= SDVO_PIPE_B_SELECT;
+	}
 
 	I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
 	POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@
 	}
 }
 
-static void intel_hdmi_save(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
-	hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
-	I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
-	POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
 static int intel_hdmi_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
@@ -151,13 +133,14 @@
 static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
 	struct edid *edid = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
 
 	hdmi_priv->has_hdmi_sink = false;
-	edid = drm_get_edid(&intel_encoder->base,
+	edid = drm_get_edid(connector,
 			    intel_encoder->ddc_bus);
 
 	if (edid) {
@@ -165,7 +148,7 @@
 			status = connector_status_connected;
 			hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
 		}
-		intel_encoder->base.display_info.raw_edid = NULL;
+		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -174,24 +157,21 @@
 
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
 	/* We should parse the EDID data and find out if it's an HDMI sink so
 	 * we can send audio to it.
 	 */
 
-	return intel_ddc_get_modes(intel_encoder);
+	return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-	kfree(intel_encoder);
+	kfree(connector);
 }
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_hdmi_save,
-	.restore = intel_hdmi_restore,
 	.detect = intel_hdmi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
 	.get_modes = intel_hdmi_get_modes,
 	.mode_valid = intel_hdmi_mode_valid,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+	if (intel_encoder->i2c_bus)
+		intel_i2c_destroy(intel_encoder->i2c_bus);
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct intel_hdmi_priv *hdmi_priv;
 
 	intel_encoder = kcalloc(sizeof(struct intel_encoder) +
 			       sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
 	if (!intel_encoder)
 		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
 	hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
 
-	connector = &intel_encoder->base;
+	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
 	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
 
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@
 			 DRM_MODE_ENCODER_TMDS);
 	drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
 
-	drm_mode_connector_attach_encoder(&intel_encoder->base,
+	drm_mode_connector_attach_encoder(&intel_connector->base,
 					  &intel_encoder->enc);
 	drm_sysfs_connector_add(connector);
 
@@ -303,6 +295,7 @@
 err_connector:
 	drm_connector_cleanup(connector);
 	kfree(intel_encoder);
+	kfree(intel_connector);
 
 	return;
 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a..6a1accd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@
 	/* XXX: We never power down the LVDS pairs. */
 }
 
-static void intel_lvds_save(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
-	u32 pwm_ctl_reg;
-
-	if (HAS_PCH_SPLIT(dev)) {
-		pp_on_reg = PCH_PP_ON_DELAYS;
-		pp_off_reg = PCH_PP_OFF_DELAYS;
-		pp_ctl_reg = PCH_PP_CONTROL;
-		pp_div_reg = PCH_PP_DIVISOR;
-		pwm_ctl_reg = BLC_PWM_CPU_CTL;
-	} else {
-		pp_on_reg = PP_ON_DELAYS;
-		pp_off_reg = PP_OFF_DELAYS;
-		pp_ctl_reg = PP_CONTROL;
-		pp_div_reg = PP_DIVISOR;
-		pwm_ctl_reg = BLC_PWM_CTL;
-	}
-
-	dev_priv->savePP_ON = I915_READ(pp_on_reg);
-	dev_priv->savePP_OFF = I915_READ(pp_off_reg);
-	dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
-	dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
-	dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
-	dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-				       BACKLIGHT_DUTY_CYCLE_MASK);
-
-	/*
-	 * If the light is off at server startup, just make it full brightness
-	 */
-	if (dev_priv->backlight_duty_cycle == 0)
-		dev_priv->backlight_duty_cycle =
-			intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
-	u32 pwm_ctl_reg;
-
-	if (HAS_PCH_SPLIT(dev)) {
-		pp_on_reg = PCH_PP_ON_DELAYS;
-		pp_off_reg = PCH_PP_OFF_DELAYS;
-		pp_ctl_reg = PCH_PP_CONTROL;
-		pp_div_reg = PCH_PP_DIVISOR;
-		pwm_ctl_reg = BLC_PWM_CPU_CTL;
-	} else {
-		pp_on_reg = PP_ON_DELAYS;
-		pp_off_reg = PP_OFF_DELAYS;
-		pp_ctl_reg = PP_CONTROL;
-		pp_div_reg = PP_DIVISOR;
-		pwm_ctl_reg = BLC_PWM_CTL;
-	}
-
-	I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
-	I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
-	I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
-	I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
-	I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
-	if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
-		intel_lvds_set_power(dev, true);
-	else
-		intel_lvds_set_power(dev, false);
-}
-
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
@@ -635,12 +566,13 @@
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret = 0;
 
 	if (dev_priv->lvds_edid_good) {
-		ret = intel_ddc_get_modes(intel_encoder);
+		ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 
 		if (ret)
 			return ret;
@@ -717,11 +649,8 @@
 static void intel_lvds_destroy(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
 	if (dev_priv->lid_notifier.notifier_call)
 		acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
 	drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@
 				   uint64_t value)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder =
-			to_intel_encoder(connector);
 
 	if (property == dev->mode_config.scaling_mode_property &&
 				connector->encoder) {
 		struct drm_crtc *crtc = connector->encoder->crtc;
+		struct drm_encoder *encoder = connector->encoder;
+		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 		struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
 		if (value == DRM_MODE_SCALE_NONE) {
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			return 0;
@@ -774,13 +704,11 @@
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
 	.get_modes = intel_lvds_get_modes,
 	.mode_valid = intel_lvds_mode_valid,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_lvds_save,
-	.restore = intel_lvds_restore,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@
 
 static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+	if (intel_encoder->ddc_bus)
+		intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@
 		return;
 	}
 
-	connector = &intel_encoder->base;
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
+	connector = &intel_connector->base;
 	encoder = &intel_encoder->enc;
-	drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
 
 	drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 
-	drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
 	intel_encoder->crtc_mask = (1 << 1);
+	if (IS_I965G(dev))
+		intel_encoder->crtc_mask |= (1 << 0);
 	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@
 	 * the initial panel fitting mode will be FULL_SCREEN.
 	 */
 
-	drm_connector_attach_property(&intel_encoder->base,
+	drm_connector_attach_property(&intel_connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_FULLSCREEN);
 	lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@
 	 */
 	dev_priv->lvds_edid_good = true;
 
-	if (!intel_ddc_get_modes(intel_encoder))
+	if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
 		dev_priv->lvds_edid_good = false;
 
 	list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
 	kfree(intel_encoder);
+	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b..4b1fd3d 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@
 		}
 	};
 
-	intel_i2c_quirk_set(intel_encoder->base.dev, true);
+	intel_i2c_quirk_set(intel_encoder->enc.dev, true);
 	ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
-	intel_i2c_quirk_set(intel_encoder->base.dev, false);
+	intel_i2c_quirk_set(intel_encoder->enc.dev, false);
 	if (ret == 2)
 		return true;
 
@@ -66,22 +66,23 @@
 /**
  * intel_ddc_get_modes - get modelist from monitor
  * @connector: DRM connector device to use
+ * @adapter: i2c adapter
  *
  * Fetch the EDID information from @connector using the DDC bus.
  */
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+			struct i2c_adapter *adapter)
 {
 	struct edid *edid;
 	int ret = 0;
 
-	intel_i2c_quirk_set(intel_encoder->base.dev, true);
-	edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
-	intel_i2c_quirk_set(intel_encoder->base.dev, false);
+	intel_i2c_quirk_set(connector->dev, true);
+	edid = drm_get_edid(connector, adapter);
+	intel_i2c_quirk_set(connector->dev, false);
 	if (edid) {
-		drm_mode_connector_update_edid_property(&intel_encoder->base,
-							edid);
-		ret = drm_add_edid_modes(&intel_encoder->base, edid);
-		intel_encoder->base.display_info.raw_edid = NULL;
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1..b0e17b0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@
 
 	/* never have the overlay hw on without showing a frame */
 	BUG_ON(!overlay->vid_bo);
-	obj = overlay->vid_bo->obj;
+	obj = &overlay->vid_bo->base;
 
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@
 
 	switch (overlay->hw_wedged) {
 		case RELEASE_OLD_VID:
-			obj = overlay->old_vid_bo->obj;
+			obj = &overlay->old_vid_bo->base;
 			i915_gem_object_unpin(obj);
 			drm_gem_object_unreference(obj);
 			overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@
 	if (ret != 0)
 		return ret;
 
-	obj = overlay->old_vid_bo->obj;
+	obj = &overlay->old_vid_bo->base;
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(obj);
 	overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@
 		return;
 	overlay->dev = dev;
 
-	reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+	reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
 	if (!reg_bo)
 		goto out_free;
 	overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d9536..aba72c4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+                         SDVO_TV_MASK)
+
+#define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
+
 
 static char *tv_format_names[] = {
 	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
@@ -86,12 +97,6 @@
 	/* This is for current tv format name */
 	char *tv_format_name;
 
-	/* This contains all current supported TV format */
-	char *tv_format_supported[TV_FORMAT_NUM];
-	int   format_supported_num;
-	struct drm_property *tv_format_property;
-	struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
 	/**
 	 * This is set if we treat the device as HDMI, instead of DVI.
 	 */
@@ -112,12 +117,6 @@
 	 */
 	struct drm_display_mode *sdvo_lvds_fixed_mode;
 
-	/**
-	 * Returned SDTV resolutions allowed for the current format, if the
-	 * device reported it.
-	 */
-	struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
 	/*
 	 * supported encoding mode, used to determine whether HDMI is
 	 * supported
@@ -130,11 +129,24 @@
 	/* Mac mini hack -- use the same DDC as the analog connector */
 	struct i2c_adapter *analog_ddc_bus;
 
-	int save_sdvo_mult;
-	u16 save_active_outputs;
-	struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
-	struct intel_sdvo_dtd save_output_dtd[16];
-	u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+	/* Mark the type of connector */
+	uint16_t output_flag;
+
+	/* This contains all current supported TV format */
+	char *tv_format_supported[TV_FORMAT_NUM];
+	int   format_supported_num;
+	struct drm_property *tv_format_property;
+	struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+	/**
+	 * Returned SDTV resolutions allowed for the current format, if the
+	 * device reported it.
+	 */
+	struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
 	/* add the property for the SDVO-TV */
 	struct drm_property *left_property;
 	struct drm_property *right_property;
@@ -162,7 +174,12 @@
 };
 
 static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+			uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
 
 /**
  * Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@
  */
 static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_device *dev = intel_encoder->enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_sdvo_priv   *sdvo_priv = intel_encoder->dev_priv;
 	u32 bval = val, cval = val;
 	int i;
 
+	if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+		I915_WRITE(sdvo_priv->sdvo_reg, val);
+		I915_READ(sdvo_priv->sdvo_reg);
+		return;
+	}
+
 	if (sdvo_priv->sdvo_reg == SDVOB) {
 		cval = I915_READ(SDVOC);
 	} else {
@@ -353,7 +376,8 @@
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
 };
 
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg)	(reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
 #define SDVO_PRIV(encoder)   ((struct intel_sdvo_priv *) (encoder)->dev_priv)
 
 static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@
 	return true;
 }
 
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
-					  u16 *outputs)
-{
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
-	return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
 static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
 					  u16 outputs)
 {
@@ -646,40 +659,6 @@
 	return (status == SDVO_CMD_STATUS_SUCCESS);
 }
 
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
-				  struct intel_sdvo_dtd *dtd)
-{
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
-					  sizeof(dtd->part1));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
-					  sizeof(dtd->part2));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
-					 struct intel_sdvo_dtd *dtd)
-{
-	return intel_sdvo_get_timing(intel_encoder,
-				     SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
-					 struct intel_sdvo_dtd *dtd)
-{
-	return intel_sdvo_get_timing(intel_encoder,
-				     SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
 static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
 				  struct intel_sdvo_dtd *dtd)
 {
@@ -767,23 +746,6 @@
 	return false;
 }
 
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
-	u8 response, status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
-	if (status != SDVO_CMD_STATUS_SUCCESS) {
-		DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
-		return SDVO_CLOCK_RATE_MULT_1X;
-	} else {
-		DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
-	}
-
-	return response;
-}
-
 static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
 {
 	u8 status;
@@ -1071,7 +1033,7 @@
 	memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
 			sizeof(format) : sizeof(format_map));
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
 			     sizeof(format));
 
 	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@
 		/* Set output timings */
 		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
 		intel_sdvo_set_target_output(intel_encoder,
-					     dev_priv->controlled_output);
+					     dev_priv->attached_output);
 		intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
 
 		/* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@
 				dev_priv->sdvo_lvds_fixed_mode);
 
 		intel_sdvo_set_target_output(intel_encoder,
-					     dev_priv->controlled_output);
+					     dev_priv->attached_output);
 		intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
 
 		/* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@
 	 * channel on the motherboard.  In a two-input device, the first input
 	 * will be SDVOB and the second SDVOC.
 	 */
-	in_out.in0 = sdvo_priv->controlled_output;
+	in_out.in0 = sdvo_priv->attached_output;
 	in_out.in1 = 0;
 
 	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@
 	if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
 		/* Set the output timing to the screen */
 		intel_sdvo_set_target_output(intel_encoder,
-					     sdvo_priv->controlled_output);
+					     sdvo_priv->attached_output);
 		intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
 	}
 
@@ -1352,107 +1314,16 @@
 
 		if (0)
 			intel_sdvo_set_encoder_power_state(intel_encoder, mode);
-		intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+		intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
 	}
 	return;
 }
 
-static void intel_sdvo_save(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	int o;
-
-	sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
-	intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-		intel_sdvo_set_target_input(intel_encoder, true, false);
-		intel_sdvo_get_input_timing(intel_encoder,
-					    &sdvo_priv->save_input_dtd_1);
-	}
-
-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-		intel_sdvo_set_target_input(intel_encoder, false, true);
-		intel_sdvo_get_input_timing(intel_encoder,
-					    &sdvo_priv->save_input_dtd_2);
-	}
-
-	for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
-	{
-	        u16  this_output = (1 << o);
-		if (sdvo_priv->caps.output_flags & this_output)
-		{
-			intel_sdvo_set_target_output(intel_encoder, this_output);
-			intel_sdvo_get_output_timing(intel_encoder,
-						     &sdvo_priv->save_output_dtd[o]);
-		}
-	}
-	if (sdvo_priv->is_tv) {
-		/* XXX: Save TV format/enhancements. */
-	}
-
-	sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	int o;
-	int i;
-	bool input1, input2;
-	u8 status;
-
-	intel_sdvo_set_active_outputs(intel_encoder, 0);
-
-	for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
-	{
-		u16  this_output = (1 << o);
-		if (sdvo_priv->caps.output_flags & this_output) {
-			intel_sdvo_set_target_output(intel_encoder, this_output);
-			intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
-		}
-	}
-
-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-		intel_sdvo_set_target_input(intel_encoder, true, false);
-		intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
-	}
-
-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-		intel_sdvo_set_target_input(intel_encoder, false, true);
-		intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
-	}
-
-	intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
-	if (sdvo_priv->is_tv) {
-		/* XXX: Restore TV format/enhancements. */
-	}
-
-	intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
-	if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
-	{
-		for (i = 0; i < 2; i++)
-			intel_wait_for_vblank(dev);
-		status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
-		if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
-			DRM_DEBUG_KMS("First %s output reported failure to "
-					"sync\n", SDVO_NAME(sdvo_priv));
-	}
-
-	intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
 static int intel_sdvo_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@
 	return true;
 }
 
+/* No use! */
+#if 0
 struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
 {
 	struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@
 	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
 	intel_sdvo_read_response(intel_encoder, &response, 2);
 }
+#endif
 
 static bool
 intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@
 intel_find_analog_connector(struct drm_device *dev)
 {
 	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 	struct intel_encoder *intel_encoder;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		intel_encoder = to_intel_encoder(connector);
-		if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
-			return connector;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		intel_encoder = enc_to_intel_encoder(encoder);
+		if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+				if (connector && encoder == intel_attached_encoder(connector))
+					return connector;
+			}
+		}
 	}
 	return NULL;
 }
@@ -1625,15 +1504,17 @@
 }
 
 enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 	enum drm_connector_status status = connector_status_connected;
 	struct edid *edid = NULL;
 
-	edid = drm_get_edid(&intel_encoder->base,
-			    intel_encoder->ddc_bus);
+	edid = drm_get_edid(connector, intel_encoder->ddc_bus);
 
 	/* This is only applied to SDVO cards with multiple outputs */
 	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@
 		 */
 		while(temp_ddc > 1) {
 			sdvo_priv->ddc_bus = temp_ddc;
-			edid = drm_get_edid(&intel_encoder->base,
-				intel_encoder->ddc_bus);
+			edid = drm_get_edid(connector, intel_encoder->ddc_bus);
 			if (edid) {
 				/*
 				 * When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@
 	/* when there is no edid and no monitor is connected with VGA
 	 * port, try to use the CRT ddc to read the EDID for DVI-connector
 	 */
-	if (edid == NULL &&
-	    sdvo_priv->analog_ddc_bus &&
-	    !intel_analog_is_connected(intel_encoder->base.dev))
-		edid = drm_get_edid(&intel_encoder->base,
-				    sdvo_priv->analog_ddc_bus);
+	if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+	    !intel_analog_is_connected(connector->dev))
+		edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
 	if (edid != NULL) {
-		/* Don't report the output as connected if it's a DVI-I
-		 * connector with a non-digital EDID coming out.
-		 */
-		if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
-			if (edid->input & DRM_EDID_INPUT_DIGITAL)
-				sdvo_priv->is_hdmi =
-					drm_detect_hdmi_monitor(edid);
-			else
-				status = connector_status_disconnected;
-		}
+		bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+		bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
 
-		kfree(edid);
-		intel_encoder->base.display_info.raw_edid = NULL;
+		/* DDC bus is shared, match EDID to connector type */
+		if (is_digital && need_digital)
+			sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+		else if (is_digital != need_digital)
+			status = connector_status_disconnected;
 
-	} else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+		connector->display_info.raw_edid = NULL;
+	} else
 		status = connector_status_disconnected;
+	
+	kfree(edid);
 
 	return status;
 }
@@ -1694,8 +1571,12 @@
 {
 	uint16_t response;
 	u8 status;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	enum drm_connector_status ret;
 
 	intel_sdvo_write_cmd(intel_encoder,
 			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@
 	if (response == 0)
 		return connector_status_disconnected;
 
-	if (intel_sdvo_multifunc_encoder(intel_encoder) &&
-		sdvo_priv->attached_output != response) {
-		if (sdvo_priv->controlled_output != response &&
-			intel_sdvo_output_setup(intel_encoder, response) != true)
-			return connector_status_unknown;
-		sdvo_priv->attached_output = response;
+	sdvo_priv->attached_output = response;
+
+	if ((sdvo_connector->output_flag & response) == 0)
+		ret = connector_status_disconnected;
+	else if (response & SDVO_TMDS_MASK)
+		ret = intel_sdvo_hdmi_sink_detect(connector);
+	else
+		ret = connector_status_connected;
+
+	/* May update encoder flag for like clock for SDVO TV, etc.*/
+	if (ret == connector_status_connected) {
+		sdvo_priv->is_tv = false;
+		sdvo_priv->is_lvds = false;
+		intel_encoder->needs_tv_clock = false;
+
+		if (response & SDVO_TV_MASK) {
+			sdvo_priv->is_tv = true;
+			intel_encoder->needs_tv_clock = true;
+		}
+		if (response & SDVO_LVDS_MASK)
+			sdvo_priv->is_lvds = true;
 	}
-	return intel_sdvo_hdmi_sink_detect(connector, response);
+
+	return ret;
 }
 
 static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	int num_modes;
 
 	/* set the bus switch and get the modes */
-	num_modes = intel_ddc_get_modes(intel_encoder);
+	num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 
 	/*
 	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@
 	 */
 	if (num_modes == 0 &&
 	    sdvo_priv->analog_ddc_bus &&
-	    !intel_analog_is_connected(intel_encoder->base.dev)) {
-		struct i2c_adapter *digital_ddc_bus;
-
+	    !intel_analog_is_connected(connector->dev)) {
 		/* Switch to the analog ddc bus and try that
 		 */
-		digital_ddc_bus = intel_encoder->ddc_bus;
-		intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
-		(void) intel_ddc_get_modes(intel_encoder);
-
-		intel_encoder->ddc_bus = digital_ddc_bus;
+		(void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
 	}
 }
 
@@ -1821,8 +1712,9 @@
 
 static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *output = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	struct intel_sdvo_sdtv_resolution_request tv_res;
 	uint32_t reply = 0, format_map = 0;
 	int i;
@@ -1842,11 +1734,11 @@
 	       sizeof(format_map) ? sizeof(format_map) :
 	       sizeof(struct intel_sdvo_sdtv_resolution_request));
 
-	intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+	intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
 
-	intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
 			     &tv_res, sizeof(tv_res));
-	status = intel_sdvo_read_response(output, &reply, 3);
+	status = intel_sdvo_read_response(intel_encoder, &reply, 3);
 	if (status != SDVO_CMD_STATUS_SUCCESS)
 		return;
 
@@ -1863,7 +1755,8 @@
 
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@
 	 * Assume that the preferred modes are
 	 * arranged in priority order.
 	 */
-	intel_ddc_get_modes(intel_encoder);
+	intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 	if (list_empty(&connector->probed_modes) == false)
 		goto end;
 
@@ -1902,12 +1795,12 @@
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
 {
-	struct intel_encoder *output = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 
-	if (sdvo_priv->is_tv)
+	if (IS_TV(sdvo_connector))
 		intel_sdvo_get_tv_modes(connector);
-	else if (sdvo_priv->is_lvds == true)
+	else if (IS_LVDS(sdvo_connector))
 		intel_sdvo_get_lvds_modes(connector);
 	else
 		intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@
 static
 void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
 	struct drm_device *dev = connector->dev;
 
-	if (sdvo_priv->is_tv) {
+	if (IS_TV(sdvo_priv)) {
 		if (sdvo_priv->left_property)
 			drm_property_destroy(dev, sdvo_priv->left_property);
 		if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@
 			drm_property_destroy(dev, sdvo_priv->hpos_property);
 		if (sdvo_priv->vpos_property)
 			drm_property_destroy(dev, sdvo_priv->vpos_property);
-	}
-	if (sdvo_priv->is_tv) {
 		if (sdvo_priv->saturation_property)
 			drm_property_destroy(dev,
 					sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@
 		if (sdvo_priv->hue_property)
 			drm_property_destroy(dev, sdvo_priv->hue_property);
 	}
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+	if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
 		if (sdvo_priv->brightness_property)
 			drm_property_destroy(dev,
 					sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@
 
 static void intel_sdvo_destroy(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
-	if (sdvo_priv->analog_ddc_bus)
-		intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
-	if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
-		drm_mode_destroy(connector->dev,
-				 sdvo_priv->sdvo_lvds_fixed_mode);
-
-	if (sdvo_priv->tv_format_property)
+	if (sdvo_connector->tv_format_property)
 		drm_property_destroy(connector->dev,
-				     sdvo_priv->tv_format_property);
+				     sdvo_connector->tv_format_property);
 
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
-		intel_sdvo_destroy_enhance_property(connector);
-
+	intel_sdvo_destroy_enhance_property(connector);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-
-	kfree(intel_encoder);
+	kfree(connector);
 }
 
 static int
@@ -1990,9 +1867,11 @@
 			struct drm_property *property,
 			uint64_t val)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	struct drm_encoder *encoder = &intel_encoder->enc;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 	struct drm_crtc *crtc = encoder->crtc;
 	int ret = 0;
 	bool changed = false;
@@ -2003,101 +1882,101 @@
 	if (ret < 0)
 		goto out;
 
-	if (property == sdvo_priv->tv_format_property) {
+	if (property == sdvo_connector->tv_format_property) {
 		if (val >= TV_FORMAT_NUM) {
 			ret = -EINVAL;
 			goto out;
 		}
 		if (sdvo_priv->tv_format_name ==
-		    sdvo_priv->tv_format_supported[val])
+		    sdvo_connector->tv_format_supported[val])
 			goto out;
 
-		sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+		sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
 		changed = true;
 	}
 
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+	if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
 		cmd = 0;
 		temp_value = val;
-		if (sdvo_priv->left_property == property) {
+		if (sdvo_connector->left_property == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_priv->right_property, val);
-			if (sdvo_priv->left_margin == temp_value)
+				sdvo_connector->right_property, val);
+			if (sdvo_connector->left_margin == temp_value)
 				goto out;
 
-			sdvo_priv->left_margin = temp_value;
-			sdvo_priv->right_margin = temp_value;
-			temp_value = sdvo_priv->max_hscan -
-					sdvo_priv->left_margin;
+			sdvo_connector->left_margin = temp_value;
+			sdvo_connector->right_margin = temp_value;
+			temp_value = sdvo_connector->max_hscan -
+					sdvo_connector->left_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
-		} else if (sdvo_priv->right_property == property) {
+		} else if (sdvo_connector->right_property == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_priv->left_property, val);
-			if (sdvo_priv->right_margin == temp_value)
+				sdvo_connector->left_property, val);
+			if (sdvo_connector->right_margin == temp_value)
 				goto out;
 
-			sdvo_priv->left_margin = temp_value;
-			sdvo_priv->right_margin = temp_value;
-			temp_value = sdvo_priv->max_hscan -
-				sdvo_priv->left_margin;
+			sdvo_connector->left_margin = temp_value;
+			sdvo_connector->right_margin = temp_value;
+			temp_value = sdvo_connector->max_hscan -
+				sdvo_connector->left_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
-		} else if (sdvo_priv->top_property == property) {
+		} else if (sdvo_connector->top_property == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_priv->bottom_property, val);
-			if (sdvo_priv->top_margin == temp_value)
+				sdvo_connector->bottom_property, val);
+			if (sdvo_connector->top_margin == temp_value)
 				goto out;
 
-			sdvo_priv->top_margin = temp_value;
-			sdvo_priv->bottom_margin = temp_value;
-			temp_value = sdvo_priv->max_vscan -
-					sdvo_priv->top_margin;
+			sdvo_connector->top_margin = temp_value;
+			sdvo_connector->bottom_margin = temp_value;
+			temp_value = sdvo_connector->max_vscan -
+					sdvo_connector->top_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
-		} else if (sdvo_priv->bottom_property == property) {
+		} else if (sdvo_connector->bottom_property == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_priv->top_property, val);
-			if (sdvo_priv->bottom_margin == temp_value)
+				sdvo_connector->top_property, val);
+			if (sdvo_connector->bottom_margin == temp_value)
 				goto out;
-			sdvo_priv->top_margin = temp_value;
-			sdvo_priv->bottom_margin = temp_value;
-			temp_value = sdvo_priv->max_vscan -
-					sdvo_priv->top_margin;
+			sdvo_connector->top_margin = temp_value;
+			sdvo_connector->bottom_margin = temp_value;
+			temp_value = sdvo_connector->max_vscan -
+					sdvo_connector->top_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
-		} else if (sdvo_priv->hpos_property == property) {
-			if (sdvo_priv->cur_hpos == temp_value)
+		} else if (sdvo_connector->hpos_property == property) {
+			if (sdvo_connector->cur_hpos == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_POSITION_H;
-			sdvo_priv->cur_hpos = temp_value;
-		} else if (sdvo_priv->vpos_property == property) {
-			if (sdvo_priv->cur_vpos == temp_value)
+			sdvo_connector->cur_hpos = temp_value;
+		} else if (sdvo_connector->vpos_property == property) {
+			if (sdvo_connector->cur_vpos == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_POSITION_V;
-			sdvo_priv->cur_vpos = temp_value;
-		} else if (sdvo_priv->saturation_property == property) {
-			if (sdvo_priv->cur_saturation == temp_value)
+			sdvo_connector->cur_vpos = temp_value;
+		} else if (sdvo_connector->saturation_property == property) {
+			if (sdvo_connector->cur_saturation == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_SATURATION;
-			sdvo_priv->cur_saturation = temp_value;
-		} else if (sdvo_priv->contrast_property == property) {
-			if (sdvo_priv->cur_contrast == temp_value)
+			sdvo_connector->cur_saturation = temp_value;
+		} else if (sdvo_connector->contrast_property == property) {
+			if (sdvo_connector->cur_contrast == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_CONTRAST;
-			sdvo_priv->cur_contrast = temp_value;
-		} else if (sdvo_priv->hue_property == property) {
-			if (sdvo_priv->cur_hue == temp_value)
+			sdvo_connector->cur_contrast = temp_value;
+		} else if (sdvo_connector->hue_property == property) {
+			if (sdvo_connector->cur_hue == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_HUE;
-			sdvo_priv->cur_hue = temp_value;
-		} else if (sdvo_priv->brightness_property == property) {
-			if (sdvo_priv->cur_brightness == temp_value)
+			sdvo_connector->cur_hue = temp_value;
+		} else if (sdvo_connector->brightness_property == property) {
+			if (sdvo_connector->cur_brightness == temp_value)
 				goto out;
 
 			cmd = SDVO_CMD_SET_BRIGHTNESS;
-			sdvo_priv->cur_brightness = temp_value;
+			sdvo_connector->cur_brightness = temp_value;
 		}
 		if (cmd) {
 			intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_sdvo_save,
-	.restore = intel_sdvo_restore,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@
 static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
 	.get_modes = intel_sdvo_get_modes,
 	.mode_valid = intel_sdvo_mode_valid,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+	if (intel_encoder->i2c_bus)
+		intel_i2c_destroy(intel_encoder->i2c_bus);
+	if (intel_encoder->ddc_bus)
+		intel_i2c_destroy(intel_encoder->ddc_bus);
+	if (sdvo_priv->analog_ddc_bus)
+		intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+	if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+		drm_mode_destroy(encoder->dev,
+				 sdvo_priv->sdvo_lvds_fixed_mode);
+
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@
  * outputs, then LVDS outputs.
  */
 static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+			  struct intel_sdvo_priv *sdvo, u32 reg)
 {
-	uint16_t mask = 0;
-	unsigned int num_bits;
+	struct sdvo_device_mapping *mapping;
 
-	/* Make a mask of outputs less than or equal to our own priority in the
-	 * list.
-	 */
-	switch (dev_priv->controlled_output) {
-	case SDVO_OUTPUT_LVDS1:
-		mask |= SDVO_OUTPUT_LVDS1;
-	case SDVO_OUTPUT_LVDS0:
-		mask |= SDVO_OUTPUT_LVDS0;
-	case SDVO_OUTPUT_TMDS1:
-		mask |= SDVO_OUTPUT_TMDS1;
-	case SDVO_OUTPUT_TMDS0:
-		mask |= SDVO_OUTPUT_TMDS0;
-	case SDVO_OUTPUT_RGB1:
-		mask |= SDVO_OUTPUT_RGB1;
-	case SDVO_OUTPUT_RGB0:
-		mask |= SDVO_OUTPUT_RGB0;
-		break;
-	}
+	if (IS_SDVOB(reg))
+		mapping = &(dev_priv->sdvo_mappings[0]);
+	else
+		mapping = &(dev_priv->sdvo_mappings[1]);
 
-	/* Count bits to find what number we are in the priority list. */
-	mask &= dev_priv->caps.output_flags;
-	num_bits = hweight16(mask);
-	if (num_bits > 3) {
-		/* if more than 3 outputs, default to DDC bus 3 for now */
-		num_bits = 3;
-	}
-
-	/* Corresponds to SDVO_CONTROL_BUS_DDCx */
-	dev_priv->ddc_bus = 1 << num_bits;
+	sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
 }
 
 static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
 {
 	struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
 	uint8_t status;
 
-	intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+	if (device == 0)
+		intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+	else
+		intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
 
 	intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
 	status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@
 intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
 {
 	struct drm_device *dev = chan->drm_dev;
-	struct drm_connector *connector;
+	struct drm_encoder *encoder;
 	struct intel_encoder *intel_encoder = NULL;
 
-	list_for_each_entry(connector,
-			&dev->mode_config.connector_list, head) {
-		if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
-			intel_encoder = to_intel_encoder(connector);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		intel_encoder = enc_to_intel_encoder(encoder);
+		if (intel_encoder->ddc_bus == &chan->adapter)
 			break;
-		}
 	}
 	return intel_encoder;
 }
@@ -2259,7 +2129,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct sdvo_device_mapping *my_mapping, *other_mapping;
 
-	if (sdvo_reg == SDVOB) {
+	if (IS_SDVOB(sdvo_reg)) {
 		my_mapping = &dev_priv->sdvo_mappings[0];
 		other_mapping = &dev_priv->sdvo_mappings[1];
 	} else {
@@ -2284,120 +2154,237 @@
 	/* No SDVO device info is found for another DVO port,
 	 * so use mapping assumption we had before BIOS parsing.
 	 */
-	if (sdvo_reg == SDVOB)
+	if (IS_SDVOB(sdvo_reg))
 		return 0x70;
 	else
 		return 0x72;
 }
 
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
 {
-	DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
-	return 1;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *sdvo_connector;
+
+	*ret = kzalloc(sizeof(*intel_connector) +
+			sizeof(*sdvo_connector), GFP_KERNEL);
+	if (!*ret)
+		return false;
+
+	intel_connector = *ret;
+	sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+	intel_connector->dev_priv = sdvo_connector;
+
+	return true;
 }
 
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
-	{
-		.callback = intel_sdvo_bad_tv_callback,
-		.ident = "IntelG45/ICH10R/DME1737",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
-		},
-	},
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+			   connector->connector_type);
 
-	{ }	/* terminating entry */
-};
+	drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	drm_sysfs_connector_add(connector);
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
+{
+	struct drm_encoder *encoder = &intel_encoder->enc;
+	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *sdvo_connector;
+
+	if (!intel_sdvo_connector_alloc(&intel_connector))
+		return false;
+
+	sdvo_connector = intel_connector->dev_priv;
+
+	if (device == 0) {
+		sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+		sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+	} else if (device == 1) {
+		sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+		sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+	}
+
+	connector = &intel_connector->base;
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+	if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+		&& intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+		&& sdvo_priv->is_hdmi) {
+		/* enable hdmi encoding mode if supported */
+		intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_encoder,
+					   SDVO_COLORIMETRY_RGB256);
+		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+	}
+	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+				    (1 << INTEL_ANALOG_CLONE_BIT);
+
+	intel_sdvo_connector_create(encoder, connector);
+
+	return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+        encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+        connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+        sdvo_connector = intel_connector->dev_priv;
+
+        sdvo_priv->controlled_output |= type;
+        sdvo_connector->output_flag = type;
+
+        sdvo_priv->is_tv = true;
+        intel_encoder->needs_tv_clock = true;
+        intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+        intel_sdvo_connector_create(encoder, connector);
+
+        intel_sdvo_tv_create_property(connector, type);
+
+        intel_sdvo_create_enhance_property(connector);
+
+        return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+        sdvo_connector = intel_connector->dev_priv;
+
+        if (device == 0) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+                sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+        } else if (device == 1) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+                sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+        }
+
+        intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                    (1 << INTEL_ANALOG_CLONE_BIT);
+
+        intel_sdvo_connector_create(encoder, connector);
+        return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+        encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+        connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+        sdvo_connector = intel_connector->dev_priv;
+
+        sdvo_priv->is_lvds = true;
+
+        if (device == 0) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+        } else if (device == 1) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+        }
+
+        intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+                                    (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+        intel_sdvo_connector_create(encoder, connector);
+        intel_sdvo_create_enhance_property(connector);
+        return true;
+}
 
 static bool
 intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
 {
-	struct drm_connector *connector = &intel_encoder->base;
-	struct drm_encoder *encoder = &intel_encoder->enc;
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	bool ret = true, registered = false;
 
 	sdvo_priv->is_tv = false;
 	intel_encoder->needs_tv_clock = false;
 	sdvo_priv->is_lvds = false;
 
-	if (device_is_registered(&connector->kdev)) {
-		drm_sysfs_connector_remove(connector);
-		registered = true;
-	}
+	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
 
-	if (flags &
-	    (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
-		if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
-			sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
-		else
-			sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
+	if (flags & SDVO_OUTPUT_TMDS0)
+		if (!intel_sdvo_dvi_init(intel_encoder, 0))
+			return false;
 
-		encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
-		connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+		if (!intel_sdvo_dvi_init(intel_encoder, 1))
+			return false;
 
-		if (intel_sdvo_get_supp_encode(intel_encoder,
-					       &sdvo_priv->encode) &&
-		    intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
-		    sdvo_priv->is_hdmi) {
-			/* enable hdmi encoding mode if supported */
-			intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
-			intel_sdvo_set_colorimetry(intel_encoder,
-						   SDVO_COLORIMETRY_RGB256);
-			connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-			intel_encoder->clone_mask =
-					(1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-					(1 << INTEL_ANALOG_CLONE_BIT);
-		}
-	} else if ((flags & SDVO_OUTPUT_SVID0) &&
-		   !dmi_check_system(intel_sdvo_bad_tv)) {
+	/* TV has no XXX1 function block */
+	if (flags & SDVO_OUTPUT_SVID0)
+		if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
-		encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-		connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-		sdvo_priv->is_tv = true;
-		intel_encoder->needs_tv_clock = true;
-		intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
-	} else if (flags & SDVO_OUTPUT_RGB0) {
+	if (flags & SDVO_OUTPUT_CVBS0)
+		if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
-		encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-		connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-		intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-					(1 << INTEL_ANALOG_CLONE_BIT);
-	} else if (flags & SDVO_OUTPUT_RGB1) {
+	if (flags & SDVO_OUTPUT_RGB0)
+		if (!intel_sdvo_analog_init(intel_encoder, 0))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
-		encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-		connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-		intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-					(1 << INTEL_ANALOG_CLONE_BIT);
-	} else if (flags & SDVO_OUTPUT_CVBS0) {
+	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+		if (!intel_sdvo_analog_init(intel_encoder, 1))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
-		encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-		connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-		sdvo_priv->is_tv = true;
-		intel_encoder->needs_tv_clock = true;
-		intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
-	} else if (flags & SDVO_OUTPUT_LVDS0) {
+	if (flags & SDVO_OUTPUT_LVDS0)
+		if (!intel_sdvo_lvds_init(intel_encoder, 0))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
-		encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-		connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-		sdvo_priv->is_lvds = true;
-		intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
-					(1 << INTEL_SDVO_LVDS_CLONE_BIT);
-	} else if (flags & SDVO_OUTPUT_LVDS1) {
+	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+		if (!intel_sdvo_lvds_init(intel_encoder, 1))
+			return false;
 
-		sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
-		encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-		connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-		sdvo_priv->is_lvds = true;
-		intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
-					(1 << INTEL_SDVO_LVDS_CLONE_BIT);
-	} else {
-
+	if ((flags & SDVO_OUTPUT_MASK) == 0) {
 		unsigned char bytes[2];
 
 		sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@
 		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
 			      SDVO_NAME(sdvo_priv),
 			      bytes[0], bytes[1]);
-		ret = false;
+		return false;
 	}
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 
-	if (ret && registered)
-		ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
-	return ret;
-
+	return true;
 }
 
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
 {
-      struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 	struct intel_sdvo_tv_format format;
 	uint32_t format_map, i;
 	uint8_t status;
 
-	intel_sdvo_set_target_output(intel_encoder,
-				     sdvo_priv->controlled_output);
+	intel_sdvo_set_target_output(intel_encoder, type);
 
 	intel_sdvo_write_cmd(intel_encoder,
 			     SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@
 	if (format_map == 0)
 		return;
 
-	sdvo_priv->format_supported_num = 0;
+	sdvo_connector->format_supported_num = 0;
 	for (i = 0 ; i < TV_FORMAT_NUM; i++)
 		if (format_map & (1 << i)) {
-			sdvo_priv->tv_format_supported
-			[sdvo_priv->format_supported_num++] =
+			sdvo_connector->tv_format_supported
+			[sdvo_connector->format_supported_num++] =
 			tv_format_names[i];
 		}
 
 
-	sdvo_priv->tv_format_property =
+	sdvo_connector->tv_format_property =
 			drm_property_create(
 				connector->dev, DRM_MODE_PROP_ENUM,
-				"mode", sdvo_priv->format_supported_num);
+				"mode", sdvo_connector->format_supported_num);
 
-	for (i = 0; i < sdvo_priv->format_supported_num; i++)
+	for (i = 0; i < sdvo_connector->format_supported_num; i++)
 		drm_property_add_enum(
-				sdvo_priv->tv_format_property, i,
-				i, sdvo_priv->tv_format_supported[i]);
+				sdvo_connector->tv_format_property, i,
+				i, sdvo_connector->tv_format_supported[i]);
 
-	sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+	sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
 	drm_connector_attach_property(
-			connector, sdvo_priv->tv_format_property, 0);
+			connector, sdvo_connector->tv_format_property, 0);
 
 }
 
 static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
 	struct intel_sdvo_enhancements_reply sdvo_data;
 	struct drm_device *dev = connector->dev;
 	uint8_t status;
@@ -2488,7 +2474,7 @@
 		DRM_DEBUG_KMS("No enhancement is supported\n");
 		return;
 	}
-	if (sdvo_priv->is_tv) {
+	if (IS_TV(sdvo_priv)) {
 		/* when horizontal overscan is supported, Add the left/right
 		 * property
 		 */
@@ -2636,8 +2622,6 @@
 					"default %d, current %d\n",
 					data_value[0], data_value[1], response);
 		}
-	}
-	if (sdvo_priv->is_tv) {
 		if (sdvo_data.saturation) {
 			intel_sdvo_write_cmd(intel_encoder,
 				SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@
 					data_value[0], data_value[1], response);
 		}
 	}
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+	if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
 		if (sdvo_data.brightness) {
 			intel_sdvo_write_cmd(intel_encoder,
 				SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@
 bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
 	struct intel_sdvo_priv *sdvo_priv;
-
 	u8 ch[0x40];
 	int i;
+	u32 i2c_reg, ddc_reg, analog_ddc_reg;
 
 	intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
 	if (!intel_encoder) {
@@ -2791,11 +2774,21 @@
 	intel_encoder->dev_priv = sdvo_priv;
 	intel_encoder->type = INTEL_OUTPUT_SDVO;
 
+	if (HAS_PCH_SPLIT(dev)) {
+		i2c_reg = PCH_GPIOE;
+		ddc_reg = PCH_GPIOE;
+		analog_ddc_reg = PCH_GPIOA;
+	} else {
+		i2c_reg = GPIOE;
+		ddc_reg = GPIOE;
+		analog_ddc_reg = GPIOA;
+	}
+
 	/* setup the DDC bus. */
-	if (sdvo_reg == SDVOB)
-		intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+	if (IS_SDVOB(sdvo_reg))
+		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
 	else
-		intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
 
 	if (!intel_encoder->i2c_bus)
 		goto err_inteloutput;
@@ -2809,20 +2802,20 @@
 	for (i = 0; i < 0x40; i++) {
 		if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
 			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
-					sdvo_reg == SDVOB ? 'B' : 'C');
+				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
 			goto err_i2c;
 		}
 	}
 
 	/* setup the DDC bus. */
-	if (sdvo_reg == SDVOB) {
-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
-		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+	if (IS_SDVOB(sdvo_reg)) {
+		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
 						"SDVOB/VGA DDC BUS");
 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
 	} else {
-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
-		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
 						"SDVOC/VGA DDC BUS");
 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
 	}
@@ -2833,41 +2826,21 @@
 	/* Wrap with our custom algo which switches to DDC mode */
 	intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
 
+	/* encoder type will be decided later */
+	drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+	drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
 	/* In default case sdvo lvds is false */
 	intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
 
 	if (intel_sdvo_output_setup(intel_encoder,
 				    sdvo_priv->caps.output_flags) != true) {
 		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
-			  sdvo_reg == SDVOB ? 'B' : 'C');
+			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
 		goto err_i2c;
 	}
 
-
-	connector = &intel_encoder->base;
-	drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
-			   connector->connector_type);
-
-	drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
-	connector->interlace_allowed = 0;
-	connector->doublescan_allowed = 0;
-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
-	drm_encoder_init(dev, &intel_encoder->enc,
-			&intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
-	drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
-	drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
-	if (sdvo_priv->is_tv)
-		intel_sdvo_tv_create_property(connector);
-
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
-		intel_sdvo_create_enhance_property(connector);
-
-	drm_sysfs_connector_add(connector);
-
-	intel_sdvo_select_ddc_bus(sdvo_priv);
+	intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
 
 	/* Set the input timing to the screen. Assume always input 0. */
 	intel_sdvo_set_target_input(intel_encoder, true, false);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2..6d553c2 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@
 	}
 }
 
-static void
-intel_tv_save(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	int i;
-
-	tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
-	tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
-	tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
-	tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
-	tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
-	tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
-	tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
-	tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
-	tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
-	tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
-	tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
-	tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
-	tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
-	tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
-	tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
-	tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
-	tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
-	tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
-	tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
-	tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
-	tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
-	tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
-	tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
-	tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
-	tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
-	tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
-	for (i = 0; i < 60; i++)
-		tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
-	for (i = 0; i < 60; i++)
-		tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
-	for (i = 0; i < 43; i++)
-		tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
-	for (i = 0; i < 43; i++)
-		tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
-	tv_priv->save_TV_DAC = I915_READ(TV_DAC);
-	tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	struct drm_crtc *crtc = connector->encoder->crtc;
-	struct intel_crtc *intel_crtc;
-	int i;
-
-	/* FIXME: No CRTC? */
-	if (!crtc)
-		return;
-
-	intel_crtc = to_intel_crtc(crtc);
-	I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
-	I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
-	I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
-	I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
-	I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
-	I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
-	I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
-	I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
-	I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
-	I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
-	I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
-	I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
-	I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
-	I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
-	I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
-	I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
-	I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
-	I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
-	I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
-	I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
-	I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
-	{
-		int pipeconf_reg = (intel_crtc->pipe == 0) ?
-			PIPEACONF : PIPEBCONF;
-		int dspcntr_reg = (intel_crtc->plane == 0) ?
-			DSPACNTR : DSPBCNTR;
-		int pipeconf = I915_READ(pipeconf_reg);
-		int dspcntr = I915_READ(dspcntr_reg);
-		int dspbase_reg = (intel_crtc->plane == 0) ?
-			DSPAADDR : DSPBADDR;
-		/* Pipe must be off here */
-		I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-		/* Flush the plane changes */
-		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
-		if (!IS_I9XX(dev)) {
-			/* Wait for vblank for the disable to take effect */
-			intel_wait_for_vblank(dev);
-		}
-
-		I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
-		/* Wait for vblank for the disable to take effect. */
-		intel_wait_for_vblank(dev);
-
-		/* Filter ctl must be set before TV_WIN_SIZE */
-		I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
-		I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
-		I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
-		I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
-		I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
-		I915_WRITE(pipeconf_reg, pipeconf);
-		I915_WRITE(dspcntr_reg, dspcntr);
-		/* Flush the plane changes */
-		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-	}
-
-	for (i = 0; i < 60; i++)
-		I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
-	for (i = 0; i < 60; i++)
-		I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
-	for (i = 0; i < 43; i++)
-		I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
-	for (i = 0; i < 43; i++)
-		I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
-	I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
-	I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
 static const struct tv_mode *
 intel_tv_mode_lookup (char *tv_format)
 {
@@ -1078,7 +941,8 @@
 static enum drm_mode_status
 intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 
 	/* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@
  */
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 	int i;
@@ -1475,9 +1340,9 @@
 {
 	struct drm_crtc *crtc;
 	struct drm_display_mode mode;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	struct drm_encoder *encoder = &intel_encoder->enc;
 	int dpms_mode;
 	int type = tv_priv->type;
 
@@ -1487,10 +1352,12 @@
 	if (encoder->crtc && encoder->crtc->enabled) {
 		type = intel_tv_detect_type(encoder->crtc, intel_encoder);
 	} else {
-		crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+						  &mode, &dpms_mode);
 		if (crtc) {
 			type = intel_tv_detect_type(crtc, intel_encoder);
-			intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+			intel_release_load_detect_pipe(intel_encoder, connector,
+						       dpms_mode);
 		} else
 			type = -1;
 	}
@@ -1525,7 +1392,8 @@
 intel_tv_chose_preferred_modes(struct drm_connector *connector,
 			       struct drm_display_mode *mode_ptr)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 
 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@
 intel_tv_get_modes(struct drm_connector *connector)
 {
 	struct drm_display_mode *mode_ptr;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 	int j, count = 0;
 	u64 tmp;
@@ -1604,11 +1473,9 @@
 static void
 intel_tv_destroy (struct drm_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-	kfree(intel_encoder);
+	kfree(connector);
 }
 
 
@@ -1617,9 +1484,9 @@
 		      uint64_t val)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct drm_encoder *encoder = intel_attached_encoder(connector);
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	struct drm_encoder *encoder = &intel_encoder->enc;
 	struct drm_crtc *crtc = encoder->crtc;
 	int ret = 0;
 	bool changed = false;
@@ -1676,8 +1543,6 @@
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
-	.save = intel_tv_save,
-	.restore = intel_tv_restore,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
 	.mode_valid = intel_tv_mode_valid,
 	.get_modes = intel_tv_get_modes,
-	.best_encoder = intel_best_encoder,
+	.best_encoder = intel_attached_encoder,
 };
 
 static void intel_tv_enc_destroy(struct drm_encoder *encoder)
 {
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
 	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
 	struct intel_tv_priv *tv_priv;
 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
 	char **tv_format_names;
@@ -1786,7 +1655,13 @@
 		return;
 	}
 
-	connector = &intel_encoder->base;
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_encoder);
+		return;
+	}
+
+	connector = &intel_connector->base;
 
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 			   DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@
 	drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
 			 DRM_MODE_ENCODER_TVDAC);
 
-	drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
 	tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 453df3f..acd31ed 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,8 @@
              nv50_cursor.o nv50_display.o nv50_fbcon.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
-             nv17_gpio.o nv50_gpio.o
+             nv17_gpio.o nv50_gpio.o \
+	     nv50_calc.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index abc382a..e7e69cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
 #define NV_DEBUG_NOTRACE
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_encoder.h"
 
 /* these defines are made up */
 #define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@
 struct init_tbl_entry {
 	char *name;
 	uint8_t id;
+	/* Return:
+	 *  > 0: success, length of opcode
+	 *    0: success, but abort further parsing of table (INIT_DONE etc)
+	 *  < 0: failure, table parsing will be aborted
+	 */
 	int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 
@@ -709,6 +715,83 @@
 	return dcb_entry;
 }
 
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+	uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+	int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+	int recordoffset = 0, rdofs = 1, wrofs = 0;
+	uint8_t port_type = 0;
+
+	if (!i2ctable)
+		return -EINVAL;
+
+	if (dcb_version >= 0x30) {
+		if (i2ctable[0] != dcb_version) /* necessary? */
+			NV_WARN(dev,
+				"DCB I2C table version mismatch (%02X vs %02X)\n",
+				i2ctable[0], dcb_version);
+		dcb_i2c_ver = i2ctable[0];
+		headerlen = i2ctable[1];
+		if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+			i2c_entries = i2ctable[2];
+		else
+			NV_WARN(dev,
+				"DCB I2C table has more entries than indexable "
+				"(%d entries, max %d)\n", i2ctable[2],
+				DCB_MAX_NUM_I2C_ENTRIES);
+		entry_len = i2ctable[3];
+		/* [4] is i2c_default_indices, read in parse_dcb_table() */
+	}
+	/*
+	 * It's your own fault if you call this function on a DCB 1.1 BIOS --
+	 * the test below is for DCB 1.2
+	 */
+	if (dcb_version < 0x14) {
+		recordoffset = 2;
+		rdofs = 0;
+		wrofs = 1;
+	}
+
+	if (index == 0xf)
+		return 0;
+	if (index >= i2c_entries) {
+		NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+			 index, i2ctable[2]);
+		return -ENOENT;
+	}
+	if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+		NV_ERROR(dev, "DCB I2C entry invalid\n");
+		return -EINVAL;
+	}
+
+	if (dcb_i2c_ver >= 0x30) {
+		port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+		/*
+		 * Fixup for chips using same address offset for read and
+		 * write.
+		 */
+		if (port_type == 4)	/* seen on C51 */
+			rdofs = wrofs = 1;
+		if (port_type >= 5)	/* G80+ */
+			rdofs = wrofs = 0;
+	}
+
+	if (dcb_i2c_ver >= 0x40) {
+		if (port_type != 5 && port_type != 6)
+			NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+		i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+	}
+
+	i2c->port_type = port_type;
+	i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+	i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+	return 0;
+}
+
 static struct nouveau_i2c_chan *
 init_i2c_device_find(struct drm_device *dev, int i2c_index)
 {
@@ -727,6 +810,20 @@
 	}
 	if (i2c_index == 0x80)	/* g80+ */
 		i2c_index = dcb->i2c_default_indices & 0xf;
+	else
+	if (i2c_index == 0x81)
+		i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+	if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+		NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+		return NULL;
+	}
+
+	/* Make sure i2c table entry has been parsed, it may not
+	 * have been if this is a bus not referenced by a DCB encoder
+	 */
+	read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+			   i2c_index, &dcb->i2c[i2c_index]);
 
 	return nouveau_i2c_find(dev, i2c_index);
 }
@@ -818,7 +915,7 @@
 		NV_ERROR(bios->dev,
 			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
 			 offset, config, count);
-		return 0;
+		return -EINVAL;
 	}
 
 	configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@
 		NV_ERROR(bios->dev,
 			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
 			 offset, config, count);
-		return 0;
+		return -EINVAL;
 	}
 
 	freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1067,6 +1164,126 @@
 }
 
 static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_DP_CONDITION   opcode: 0x3A ('')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): "sub" opcode
+	 * offset + 2  (8 bit): unknown
+	 *
+	 */
+
+	struct bit_displayport_encoder_table *dpe = NULL;
+	struct dcb_entry *dcb = bios->display.output;
+	struct drm_device *dev = bios->dev;
+	uint8_t cond = bios->data[offset + 1];
+	int dummy;
+
+	BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+	if (!iexec->execute)
+		return 3;
+
+	dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+	if (!dpe) {
+		NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+		return -EINVAL;
+	}
+
+	switch (cond) {
+	case 0:
+	{
+		struct dcb_connector_table_entry *ent =
+			&bios->dcb.connector.entry[dcb->connector];
+
+		if (ent->type != DCB_CONNECTOR_eDP)
+			iexec->execute = false;
+	}
+		break;
+	case 1:
+	case 2:
+		if (!(dpe->unknown & cond))
+			iexec->execute = false;
+		break;
+	case 5:
+	{
+		struct nouveau_i2c_chan *auxch;
+		int ret;
+
+		auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+		if (!auxch)
+			return -ENODEV;
+
+		ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+		if (ret)
+			return ret;
+
+		if (cond & 1)
+			iexec->execute = false;
+	}
+		break;
+	default:
+		NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+		break;
+	}
+
+	if (iexec->execute)
+		BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+	else
+		BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+	return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_3B   opcode: 0x3B ('')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): crtc index
+	 *
+	 */
+
+	uint8_t or = ffs(bios->display.output->or) - 1;
+	uint8_t index = bios->data[offset + 1];
+	uint8_t data;
+
+	if (!iexec->execute)
+		return 2;
+
+	data = bios_idxprt_rd(bios, 0x3d4, index);
+	bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+	return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_3C   opcode: 0x3C ('')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): crtc index
+	 *
+	 */
+
+	uint8_t or = ffs(bios->display.output->or) - 1;
+	uint8_t index = bios->data[offset + 1];
+	uint8_t data;
+
+	if (!iexec->execute)
+		return 2;
+
+	data = bios_idxprt_rd(bios, 0x3d4, index);
+	bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+	return 2;
+}
+
+static int
 init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
 		      struct init_exec *iexec)
 {
@@ -1170,7 +1387,7 @@
 		NV_ERROR(bios->dev,
 			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
 			 offset, config, count);
-		return 0;
+		return -EINVAL;
 	}
 
 	freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@
 	 */
 
 	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t i2c_address = bios->data[offset + 2] >> 1;
 	uint8_t count = bios->data[offset + 3];
-	int len = 4 + count * 3;
 	struct nouveau_i2c_chan *chan;
-	struct i2c_msg msg;
-	int i;
+	int len = 4 + count * 3;
+	int ret, i;
 
 	if (!iexec->execute)
 		return len;
@@ -1247,35 +1463,34 @@
 
 	chan = init_i2c_device_find(bios->dev, i2c_index);
 	if (!chan)
-		return 0;
+		return -ENODEV;
 
 	for (i = 0; i < count; i++) {
-		uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+		uint8_t reg = bios->data[offset + 4 + i * 3];
 		uint8_t mask = bios->data[offset + 5 + i * 3];
 		uint8_t data = bios->data[offset + 6 + i * 3];
-		uint8_t value;
+		union i2c_smbus_data val;
 
-		msg.addr = i2c_address;
-		msg.flags = I2C_M_RD;
-		msg.len = 1;
-		msg.buf = &value;
-		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-			return 0;
+		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+				     I2C_SMBUS_READ, reg,
+				     I2C_SMBUS_BYTE_DATA, &val);
+		if (ret < 0)
+			return ret;
 
 		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
 			      "Mask: 0x%02X, Data: 0x%02X\n",
-			offset, i2c_reg, value, mask, data);
+			offset, reg, val.byte, mask, data);
 
-		value = (value & mask) | data;
+		if (!bios->execute)
+			continue;
 
-		if (bios->execute) {
-			msg.addr = i2c_address;
-			msg.flags = 0;
-			msg.len = 1;
-			msg.buf = &value;
-			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-				return 0;
-		}
+		val.byte &= mask;
+		val.byte |= data;
+		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+				     I2C_SMBUS_WRITE, reg,
+				     I2C_SMBUS_BYTE_DATA, &val);
+		if (ret < 0)
+			return ret;
 	}
 
 	return len;
@@ -1301,12 +1516,11 @@
 	 */
 
 	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t i2c_address = bios->data[offset + 2] >> 1;
 	uint8_t count = bios->data[offset + 3];
-	int len = 4 + count * 2;
 	struct nouveau_i2c_chan *chan;
-	struct i2c_msg msg;
-	int i;
+	int len = 4 + count * 2;
+	int ret, i;
 
 	if (!iexec->execute)
 		return len;
@@ -1317,23 +1531,25 @@
 
 	chan = init_i2c_device_find(bios->dev, i2c_index);
 	if (!chan)
-		return 0;
+		return -ENODEV;
 
 	for (i = 0; i < count; i++) {
-		uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
-		uint8_t data = bios->data[offset + 5 + i * 2];
+		uint8_t reg = bios->data[offset + 4 + i * 2];
+		union i2c_smbus_data val;
+
+		val.byte = bios->data[offset + 5 + i * 2];
 
 		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
-			offset, i2c_reg, data);
+			offset, reg, val.byte);
 
-		if (bios->execute) {
-			msg.addr = i2c_address;
-			msg.flags = 0;
-			msg.len = 1;
-			msg.buf = &data;
-			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-				return 0;
-		}
+		if (!bios->execute)
+			continue;
+
+		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+				     I2C_SMBUS_WRITE, reg,
+				     I2C_SMBUS_BYTE_DATA, &val);
+		if (ret < 0)
+			return ret;
 	}
 
 	return len;
@@ -1357,7 +1573,7 @@
 	 */
 
 	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t i2c_address = bios->data[offset + 2] >> 1;
 	uint8_t count = bios->data[offset + 3];
 	int len = 4 + count;
 	struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@
 
 	chan = init_i2c_device_find(bios->dev, i2c_index);
 	if (!chan)
-		return 0;
+		return -ENODEV;
 
 	for (i = 0; i < count; i++) {
 		data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@
 		msg.len = count;
 		msg.buf = data;
 		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-			return 0;
+			return -EIO;
 	}
 
 	return len;
@@ -1427,7 +1643,7 @@
 
 	reg = get_tmds_index_reg(bios->dev, mlv);
 	if (!reg)
-		return 0;
+		return -EINVAL;
 
 	bios_wr32(bios, reg,
 		  tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@
 
 	reg = get_tmds_index_reg(bios->dev, mlv);
 	if (!reg)
-		return 0;
+		return -EINVAL;
 
 	for (i = 0; i < count; i++) {
 		uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@
 	uint32_t reg, data;
 
 	if (bios->major_version > 2)
-		return 0;
+		return -ENODEV;
 
 	bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
 		       bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@
 	int clock;
 
 	if (bios->major_version > 2)
-		return 0;
+		return -ENODEV;
 
 	clock = ROM16(bios->data[meminitoffs + 4]) * 10;
 	setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@
 	uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
 
 	if (bios->major_version > 2)
-		return 0;
+		return -ENODEV;
 
 	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
 			     NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@
 		NV_ERROR(bios->dev,
 			 "0x%04X: Zero block length - has the M table "
 			 "been parsed?\n", offset);
-		return 0;
+		return -EINVAL;
 	}
 
 	strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@
 
 	if (!bios->display.output) {
 		NV_ERROR(dev, "INIT_AUXCH: no active output\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
 	if (!auxch) {
 		NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
 			 bios->display.output->i2c_index);
-		return 0;
+		return -ENODEV;
 	}
 
 	if (!iexec->execute)
@@ -2860,7 +3076,7 @@
 		ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
 		if (ret) {
 			NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
-			return 0;
+			return ret;
 		}
 
 		data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@
 		ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
 		if (ret) {
 			NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
-			return 0;
+			return ret;
 		}
 	}
 
@@ -2899,14 +3115,14 @@
 
 	if (!bios->display.output) {
 		NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
 	if (!auxch) {
 		NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
 			 bios->display.output->i2c_index);
-		return 0;
+		return -ENODEV;
 	}
 
 	if (!iexec->execute)
@@ -2917,7 +3133,7 @@
 		ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
 		if (ret) {
 			NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
-			return 0;
+			return ret;
 		}
 	}
 
@@ -2934,6 +3150,9 @@
 	{ "INIT_COPY"                         , 0x37, init_copy                       },
 	{ "INIT_NOT"                          , 0x38, init_not                        },
 	{ "INIT_IO_FLAG_CONDITION"            , 0x39, init_io_flag_condition          },
+	{ "INIT_DP_CONDITION"                 , 0x3A, init_dp_condition               },
+	{ "INIT_OP_3B"                        , 0x3B, init_op_3b                      },
+	{ "INIT_OP_3C"                        , 0x3C, init_op_3c                      },
 	{ "INIT_INDEX_ADDRESS_LATCHED"        , 0x49, init_idx_addr_latched           },
 	{ "INIT_IO_RESTRICT_PLL2"             , 0x4A, init_io_restrict_pll2           },
 	{ "INIT_PLL2"                         , 0x4B, init_pll2                       },
@@ -3001,7 +3220,7 @@
 	 * is changed back to EXECUTE.
 	 */
 
-	int count = 0, i, res;
+	int count = 0, i, ret;
 	uint8_t id;
 
 	/*
@@ -3016,26 +3235,33 @@
 		for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
 			;
 
-		if (itbl_entry[i].name) {
-			BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
-				offset, itbl_entry[i].id, itbl_entry[i].name);
-
-			/* execute eventual command handler */
-			res = (*itbl_entry[i].handler)(bios, offset, iexec);
-			if (!res)
-				break;
-			/*
-			 * Add the offset of the current command including all data
-			 * of that command. The offset will then be pointing on the
-			 * next op code.
-			 */
-			offset += res;
-		} else {
+		if (!itbl_entry[i].name) {
 			NV_ERROR(bios->dev,
 				 "0x%04X: Init table command not found: "
 				 "0x%02X\n", offset, id);
 			return -ENOENT;
 		}
+
+		BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+			itbl_entry[i].id, itbl_entry[i].name);
+
+		/* execute eventual command handler */
+		ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+		if (ret < 0) {
+			NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+				 "table opcode: %s %d\n", offset,
+				 itbl_entry[i].name, ret);
+		}
+
+		if (ret <= 0)
+			break;
+
+		/*
+		 * Add the offset of the current command including all data
+		 * of that command. The offset will then be pointing on the
+		 * next op code.
+		 */
+		offset += ret;
 	}
 
 	if (offset >= bios->length)
@@ -4285,31 +4511,32 @@
 			break;
 		}
 
-#if 0 /* for easy debugging */
-	ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
-	ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
-	ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
-	ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
-	ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
-	ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
-	ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
-	ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
-	ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
-	ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
-	ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
-	ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
-	ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
-	ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
-	ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
-	ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
-	ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
-	ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
-	ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+	NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+	NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+	NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+	NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+	NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+	NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+	NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+	NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+	if (pll_lim->vco2.maxfreq) {
+		NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+		NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+		NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+		NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+		NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+		NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+		NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+		NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+	}
+	if (!pll_lim->max_p) {
+		NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+		NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+	} else {
+		NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+		NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+	}
+	NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
 
 	return 0;
 }
@@ -4953,79 +5180,6 @@
 	return 0;
 }
 
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
-	uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
-	int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
-	int recordoffset = 0, rdofs = 1, wrofs = 0;
-	uint8_t port_type = 0;
-
-	if (!i2ctable)
-		return -EINVAL;
-
-	if (dcb_version >= 0x30) {
-		if (i2ctable[0] != dcb_version) /* necessary? */
-			NV_WARN(dev,
-				"DCB I2C table version mismatch (%02X vs %02X)\n",
-				i2ctable[0], dcb_version);
-		dcb_i2c_ver = i2ctable[0];
-		headerlen = i2ctable[1];
-		if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
-			i2c_entries = i2ctable[2];
-		else
-			NV_WARN(dev,
-				"DCB I2C table has more entries than indexable "
-				"(%d entries, max %d)\n", i2ctable[2],
-				DCB_MAX_NUM_I2C_ENTRIES);
-		entry_len = i2ctable[3];
-		/* [4] is i2c_default_indices, read in parse_dcb_table() */
-	}
-	/*
-	 * It's your own fault if you call this function on a DCB 1.1 BIOS --
-	 * the test below is for DCB 1.2
-	 */
-	if (dcb_version < 0x14) {
-		recordoffset = 2;
-		rdofs = 0;
-		wrofs = 1;
-	}
-
-	if (index == 0xf)
-		return 0;
-	if (index >= i2c_entries) {
-		NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
-			 index, i2ctable[2]);
-		return -ENOENT;
-	}
-	if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
-		NV_ERROR(dev, "DCB I2C entry invalid\n");
-		return -EINVAL;
-	}
-
-	if (dcb_i2c_ver >= 0x30) {
-		port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
-		/*
-		 * Fixup for chips using same address offset for read and
-		 * write.
-		 */
-		if (port_type == 4)	/* seen on C51 */
-			rdofs = wrofs = 1;
-		if (port_type >= 5)	/* G80+ */
-			rdofs = wrofs = 0;
-	}
-
-	if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
-		NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
-	i2c->port_type = port_type;
-	i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
-	i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
-	return 0;
-}
-
 static struct dcb_gpio_entry *
 new_gpio_entry(struct nvbios *bios)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c0d7b0a..adf4ec2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -35,6 +35,7 @@
 #define DCB_LOC_ON_CHIP 0
 
 struct dcb_i2c_entry {
+	uint32_t entry;
 	uint8_t port_type;
 	uint8_t read, write;
 	struct nouveau_i2c_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d176..6f3c195 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -160,11 +160,11 @@
 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
 			  ttm_bo_type_device, &nvbo->placement, align, 0,
 			  false, NULL, size, nouveau_bo_del_ttm);
-	nvbo->channel = NULL;
 	if (ret) {
 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
 		return ret;
 	}
+	nvbo->channel = NULL;
 
 	spin_lock(&dev_priv->ttm.bo_list_lock);
 	list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@
 
 	nouveau_bo_placement_set(nvbo, memtype, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -261,7 +261,7 @@
 
 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -391,25 +391,16 @@
 		break;
 	case TTM_PL_VRAM:
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
-			     TTM_MEMTYPE_FLAG_MAPPABLE |
-			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 					 TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-
-		man->io_addr = NULL;
-		man->io_offset = drm_get_resource_start(dev, 1);
-		man->io_size = drm_get_resource_len(dev, 1);
-		if (man->io_size > dev_priv->vram_size)
-			man->io_size = dev_priv->vram_size;
-
 		man->gpu_offset = dev_priv->vm_vram_base;
 		break;
 	case TTM_PL_TT:
 		switch (dev_priv->gart_info.type) {
 		case NOUVEAU_GART_AGP:
-			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
-				     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED;
 			man->default_caching = TTM_PL_FLAG_UNCACHED;
 			break;
@@ -424,10 +415,6 @@
 				 dev_priv->gart_info.type);
 			return -EINVAL;
 		}
-
-		man->io_offset  = dev_priv->gart_info.aper_base;
-		man->io_size    = dev_priv->gart_info.aper_size;
-		man->io_addr   = NULL;
 		man->gpu_offset = dev_priv->vm_gart_base;
 		break;
 	default:
@@ -462,7 +449,8 @@
 
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-			      struct nouveau_bo *nvbo, bool evict, bool no_wait,
+			      struct nouveau_bo *nvbo, bool evict,
+			      bool no_wait_reserve, bool no_wait_gpu,
 			      struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@
 		return ret;
 
 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-					evict, no_wait, new_mem);
+					evict, no_wait_reserve, no_wait_gpu, new_mem);
 	if (nvbo->channel && nvbo->channel != chan)
 		ret = nouveau_fence_wait(fence, NULL, false, false);
 	nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-		     int no_wait, struct ttm_mem_reg *new_mem)
+		     bool no_wait_reserve, bool no_wait_gpu,
+		     struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@
 		dst_offset += (PAGE_SIZE * line_count);
 	}
 
-	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 }
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait, struct ttm_mem_reg *new_mem)
+		      bool no_wait_reserve, bool no_wait_gpu,
+		      struct ttm_mem_reg *new_mem)
 {
 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 	struct ttm_placement placement;
@@ -593,7 +583,7 @@
 
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
 	if (ret)
 		return ret;
 
@@ -601,11 +591,11 @@
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
 	if (tmp_mem.mm_node) {
 		spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait, struct ttm_mem_reg *new_mem)
+		      bool no_wait_reserve, bool no_wait_gpu,
+		      struct ttm_mem_reg *new_mem)
 {
 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 	struct ttm_placement placement;
@@ -631,15 +622,15 @@
 
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 	if (ret)
 		goto out;
 
@@ -706,7 +697,8 @@
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-		bool no_wait, struct ttm_mem_reg *new_mem)
+		bool no_wait_reserve, bool no_wait_gpu,
+		struct ttm_mem_reg *new_mem)
 {
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@
 	/* Software copy if the card isn't up and running yet. */
 	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
 	    !dev_priv->channel) {
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 		goto out;
 	}
 
@@ -735,17 +727,17 @@
 
 	/* Hardware assisted copy. */
 	if (new_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 	else
-		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 
 	if (!ret)
 		goto out;
 
 	/* Fallback to software copy. */
-	ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 
 out:
 	if (ret)
@@ -762,6 +754,55 @@
 	return 0;
 }
 
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct drm_device *dev = dev_priv->dev;
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		return 0;
+	case TTM_PL_TT:
+#if __OS_HAS_AGP
+		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+			mem->bus.base = dev_priv->gart_info.aper_base;
+			mem->bus.is_iomem = true;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+		mem->bus.base = drm_get_resource_start(dev, 1);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	return 0;
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
 	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
 	.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@
 	.sync_obj_flush = nouveau_fence_flush,
 	.sync_obj_unref = nouveau_fence_unref,
 	.sync_obj_ref = nouveau_fence_ref,
+	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+	.io_mem_free = &nouveau_ttm_io_mem_free,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 14afe1e..7e663a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -843,6 +843,7 @@
 
 	switch (dcb->type) {
 	case DCB_CONNECTOR_VGA:
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 		if (dev_priv->card_type >= NV_50) {
 			drm_connector_attach_property(connector,
 					dev->mode_config.scaling_mode_property,
@@ -854,6 +855,17 @@
 	case DCB_CONNECTOR_TV_3:
 		nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
 		break;
+	case DCB_CONNECTOR_DP:
+	case DCB_CONNECTOR_eDP:
+	case DCB_CONNECTOR_HDMI_0:
+	case DCB_CONNECTOR_HDMI_1:
+	case DCB_CONNECTOR_DVI_I:
+	case DCB_CONNECTOR_DVI_D:
+		if (dev_priv->card_type >= NV_50)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		else
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+		/* fall-through */
 	default:
 		nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886..7933de4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
 #include "drmP.h"
 #include "nouveau_drv.h"
 
+#include <ttm/ttm_page_alloc.h>
+
 static int
 nouveau_debugfs_channel_info(struct seq_file *m, void *data)
 {
@@ -159,6 +161,7 @@
 	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
 	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
 	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+	{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0..74e6b4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 {
 	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
-	struct drm_device *dev = drm_fb->dev;
-
-	if (drm_fb->fbdev)
-		nouveau_fbcon_remove(dev, drm_fb);
 
 	if (fb->nvbo)
 		drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@
 	.create_handle = nouveau_user_framebuffer_create_handle,
 };
 
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
-			   struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+			 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
 {
-	struct nouveau_framebuffer *fb;
 	int ret;
 
-	fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
-	if (!fb)
-		return NULL;
-
-	ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+	ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
 	if (ret) {
-		kfree(fb);
-		return NULL;
+		return ret;
 	}
 
-	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
-	fb->nvbo = nvbo;
-	return &fb->base;
+	drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+	nouveau_fb->nvbo = nvbo;
+	return 0;
 }
 
 static struct drm_framebuffer *
@@ -89,24 +78,29 @@
 				struct drm_file *file_priv,
 				struct drm_mode_fb_cmd *mode_cmd)
 {
-	struct drm_framebuffer *fb;
+	struct nouveau_framebuffer *nouveau_fb;
 	struct drm_gem_object *gem;
+	int ret;
 
 	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
 	if (!gem)
 		return NULL;
 
-	fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
-	if (!fb) {
+	nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+	if (!nouveau_fb)
+		return NULL;
+
+	ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+	if (ret) {
 		drm_gem_object_unreference(gem);
 		return NULL;
 	}
 
-	return fb;
+	return &nouveau_fb->base;
 }
 
 const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
 	.fb_create = nouveau_user_framebuffer_create,
-	.fb_changed = nouveau_fbcon_probe,
+	.output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974a..c6079e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_channel *chan;
 	struct drm_crtc *crtc;
-	uint32_t fbdev_flags;
 	int ret, i;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@
 		return 0;
 
 	NV_INFO(dev, "Disabling fbcon acceleration...\n");
-	fbdev_flags = dev_priv->fbdev_info->flags;
-	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+	nouveau_fbcon_save_disable_accel(dev);
 
 	NV_INFO(dev, "Unpinning framebuffer(s)...\n");
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@
 	}
 
 	acquire_console_sem();
-	fb_set_suspend(dev_priv->fbdev_info, 1);
+	nouveau_fbcon_set_suspend(dev, 1);
 	release_console_sem();
-	dev_priv->fbdev_info->flags = fbdev_flags;
+	nouveau_fbcon_restore_accel(dev);
 	return 0;
 
 out_abort:
@@ -250,14 +248,12 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_engine *engine = &dev_priv->engine;
 	struct drm_crtc *crtc;
-	uint32_t fbdev_flags;
 	int ret, i;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-	fbdev_flags = dev_priv->fbdev_info->flags;
-	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+	nouveau_fbcon_save_disable_accel(dev);
 
 	NV_INFO(dev, "We're back, enabling device...\n");
 	pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@
 	}
 
 	acquire_console_sem();
-	fb_set_suspend(dev_priv->fbdev_info, 0);
+	nouveau_fbcon_set_suspend(dev, 0);
 	release_console_sem();
 
-	nouveau_fbcon_zfill(dev);
+	nouveau_fbcon_zfill_all(dev);
 
 	drm_helper_resume_force_mode(dev);
-	dev_priv->fbdev_info->flags = fbdev_flags;
+
+	nouveau_fbcon_restore_accel(dev);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630a..5b13443 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@
 
 	struct fb_info *fbdev_info;
 
+	int fifo_alloc_count;
 	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
 
 	struct nouveau_engine engine;
@@ -621,6 +622,9 @@
 	struct {
 		struct dentry *channel_root;
 	} debugfs;
+
+	struct nouveau_fbdev *nfbdev;
+	struct apertures_struct *apertures;
 };
 
 static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+		  int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+		   int clk, int *N, int *fN, int *M, int *P);
+
 #ifndef ioread32_native
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 9f28b94..e1df820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -48,6 +48,8 @@
 	union {
 		struct {
 			int mc_unknown;
+			uint32_t unk0;
+			uint32_t unk1;
 			int dpcd_version;
 			int link_nr;
 			int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31a..d432134 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@
 
 extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
 
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
-			   struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+			     struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
 #endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d..fd4a2df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	int ret, i;
@@ -97,7 +97,6 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_setcolreg = drm_fb_helper_setcolreg,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_setcolreg = drm_fb_helper_setcolreg,
 	.fb_fillrect = nv04_fbcon_fillrect,
 	.fb_copyarea = nv04_fbcon_copyarea,
 	.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_setcolreg = drm_fb_helper_setcolreg,
 	.fb_fillrect = nv50_fbcon_fillrect,
 	.fb_copyarea = nv50_fbcon_copyarea,
 	.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@
 	*blue = nv_crtc->lut.b[regno];
 }
 
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
-	.gamma_set = nouveau_fbcon_gamma_set,
-	.gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 {
-	struct pci_dev *pdev = dev->pdev;
-	int ramin;
-
-	if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
-	    screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
-		return false;
-
-	if (screen_info.lfb_base < pci_resource_start(pdev, 1))
-		goto not_fb;
-
-	if (screen_info.lfb_base + screen_info.lfb_size >=
-	    pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
-		goto not_fb;
-
-	return true;
-not_fb:
-	ramin = 2;
-	if (pci_resource_len(pdev, ramin) == 0) {
-		ramin = 3;
-		if (pci_resource_len(pdev, ramin) == 0)
-			return false;
-	}
-
-	if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
-		return false;
-
-	if (screen_info.lfb_base + screen_info.lfb_size >=
-	    pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
-		return false;
-
-	return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct fb_info *info = dev_priv->fbdev_info;
+	struct fb_info *info = nfbdev->helper.fbdev;
 	struct fb_fillrect rect;
 
 	/* Clear the entire fbcon.  The drm will program every connector
@@ -218,28 +171,27 @@
 }
 
 static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
-		     uint32_t fb_height, uint32_t surface_width,
-		     uint32_t surface_height, uint32_t surface_depth,
-		     uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+		     struct drm_fb_helper_surface_size *sizes)
 {
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
-	struct nouveau_fbcon_par *par;
 	struct drm_framebuffer *fb;
 	struct nouveau_framebuffer *nouveau_fb;
 	struct nouveau_bo *nvbo;
 	struct drm_mode_fb_cmd mode_cmd;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev->pdev;
+	struct device *device = &pdev->dev;
 	int size, ret;
 
-	mode_cmd.width = surface_width;
-	mode_cmd.height = surface_height;
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
 
-	mode_cmd.bpp = surface_bpp;
+	mode_cmd.bpp = sizes->surface_bpp;
 	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
 	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
-	mode_cmd.depth = surface_depth;
+	mode_cmd.depth = sizes->surface_depth;
 
 	size = mode_cmd.pitch * mode_cmd.height;
 	size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
-	if (!fb) {
-		ret = -ENOMEM;
-		NV_ERROR(dev, "failed to allocate fb.\n");
-		goto out_unref;
-	}
-
-	list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
-	nouveau_fb = nouveau_framebuffer(fb);
-	*pfb = fb;
-
-	info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
+	info = framebuffer_alloc(0, device);
 	if (!info) {
 		ret = -ENOMEM;
 		goto out_unref;
 	}
 
-	par = info->par;
-	par->helper.funcs = &nouveau_fbcon_helper_funcs;
-	par->helper.dev = dev;
-	ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
-	if (ret)
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
 		goto out_unref;
-	dev_priv->fbdev_info = info;
+	}
+
+	info->par = nfbdev;
+
+	nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+	nouveau_fb = &nfbdev->nouveau_fb;
+	fb = &nouveau_fb->base;
+
+	/* setup helper */
+	nfbdev->helper.fb = fb;
+	nfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "nouveaufb");
 	if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@
 	info->screen_size = size;
 
 	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* FIXME: we really shouldn't expose mmio space at all */
-	info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
-	info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+	info->fix.mmio_start = pci_resource_start(pdev, 1);
+	info->fix.mmio_len = pci_resource_len(pdev, 1);
 
 	/* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
-	if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
-		/* Some NVIDIA VBIOS' are stupid and decide to put the
-		 * framebuffer in the middle of the PRAMIN BAR for
-		 * whatever reason.  We need to know the exact lfb_base
-		 * to get vesafb kicked off, and the only reliable way
-		 * we have left is to find out lfb_base the same way
-		 * vesafb did.
-		 */
-		info->aperture_base = screen_info.lfb_base;
-		info->aperture_size = screen_info.lfb_size;
-		if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
-			info->aperture_size *= 65536;
-	} else
-#endif
-	{
-		info->aperture_base = info->fix.mmio_start;
-		info->aperture_size = info->fix.mmio_len;
+	info->apertures = dev_priv->apertures;
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
 	}
 
 	info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
 
-	fb->fbdev = info;
-
-	par->nouveau_fb = nouveau_fb;
-	par->dev = dev;
-
 	if (dev_priv->channel && !nouveau_nofbaccel) {
 		switch (dev_priv->card_type) {
 		case NV_50:
@@ -361,7 +291,7 @@
 		};
 	}
 
-	nouveau_fbcon_zfill(dev);
+	nouveau_fbcon_zfill(dev, nfbdev);
 
 	/* To allow resizeing without swapping buffers */
 	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@
 	return ret;
 }
 
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+				    struct drm_fb_helper_surface_size *sizes)
 {
-	NV_DEBUG_KMS(dev, "\n");
+	struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
 
-	return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+	if (!helper->fb) {
+		ret = nouveau_fbcon_create(nfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
 }
 
 int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 {
-	struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+	struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
 	struct fb_info *info;
 
-	if (!fb)
-		return -EINVAL;
-
-	info = fb->fbdev;
-	if (info) {
-		struct nouveau_fbcon_par *par = info->par;
-
+	if (nfbdev->helper.fbdev) {
+		info = nfbdev->helper.fbdev;
 		unregister_framebuffer(info);
-		nouveau_bo_unmap(nouveau_fb->nvbo);
-		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
-		nouveau_fb->nvbo = NULL;
-		if (par)
-			drm_fb_helper_free(&par->helper);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
 		framebuffer_release(info);
 	}
 
+	if (nouveau_fb->nvbo) {
+		nouveau_bo_unmap(nouveau_fb->nvbo);
+		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+		nouveau_fb->nvbo = NULL;
+	}
+	drm_fb_helper_fini(&nfbdev->helper);
+	drm_framebuffer_cleanup(&nouveau_fb->base);
 	return 0;
 }
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 
 	NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
 	info->flags |= FBINFO_HWACCEL_DISABLED;
 }
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+	.gamma_set = nouveau_fbcon_gamma_set,
+	.gamma_get = nouveau_fbcon_gamma_get,
+	.fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fbdev *nfbdev;
+
+	nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+	if (!nfbdev)
+		return -ENOMEM;
+
+	nfbdev->dev = dev;
+	dev_priv->nfbdev = nfbdev;
+	nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+	drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+	drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+	drm_fb_helper_initial_config(&nfbdev->helper, 32);
+	return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->nfbdev)
+		return;
+
+	nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+	kfree(dev_priv->nfbdev);
+	dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+	dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1..e7e1268 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
 
 #include "drm_fb_helper.h"
 
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
 	struct drm_fb_helper helper;
+	struct nouveau_framebuffer nouveau_fb;
+	struct list_head fbdev_list;
 	struct drm_device *dev;
-	struct nouveau_framebuffer *nouveau_fb;
+	unsigned int saved_flags;
 };
 
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
 
 void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
 void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@
 int nv50_fbcon_accel_init(struct fb_info *info);
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
 #endif /* __NV50_FBCON_H__ */
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38..69c76cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@
 	}
 
 	ttm_bo_unref(&bo);
+
+	drm_gem_object_release(gem);
+	kfree(gem);
 }
 
 int
@@ -382,7 +385,7 @@
 
 		nvbo->channel = chan;
 		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-				      false, false);
+				      false, false, false);
 		nvbo->channel = NULL;
 		if (unlikely(ret)) {
 			NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 32f0e49..f731c5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -68,13 +68,12 @@
 			return ret;
 		}
 
-		pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+		pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
 		if (!pgraph->ctxprog) {
 			NV_ERROR(dev, "OOM copying ctxprog\n");
 			release_firmware(fw);
 			return -ENOMEM;
 		}
-		memcpy(pgraph->ctxprog, fw->data, fw->size);
 
 		cp = pgraph->ctxprog;
 		if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@
 			return ret;
 		}
 
-		pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+		pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
 		if (!pgraph->ctxvals) {
 			NV_ERROR(dev, "OOM copying ctxvals\n");
 			release_firmware(fw);
 			nouveau_grctx_fini(dev);
 			return -ENOMEM;
 		}
-		memcpy(pgraph->ctxvals, fw->data, fw->size);
 
 		cv = (void *)pgraph->ctxvals;
 		if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 88583e7..316a3c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@
 nouveau_i2c_find(struct drm_device *dev, int index)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
 
 	if (index >= DCB_MAX_NUM_I2C_ENTRIES)
 		return NULL;
 
-	if (!bios->dcb.i2c[index].chan) {
-		if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
-			return NULL;
+	if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+		uint32_t reg = 0xe500, val;
+
+		if (i2c->port_type == 6) {
+			reg += i2c->read * 0x50;
+			val  = 0x2002;
+		} else {
+			reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+			val  = 0xe001;
+		}
+
+		nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
 	}
 
-	return bios->dcb.i2c[index].chan;
+	if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+		return NULL;
+	return i2c->chan;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73ce..53360f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t status, fbdev_flags = 0;
+	uint32_t status;
 	unsigned long flags;
 
 	status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
-	if (dev_priv->fbdev_info) {
-		fbdev_flags = dev_priv->fbdev_info->flags;
-		dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
-	}
-
 	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
 		nouveau_fifo_irq_handler(dev);
 		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@
 	if (status)
 		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
 
-	if (dev_priv->fbdev_info)
-		dev_priv->fbdev_info->flags = fbdev_flags;
-
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
 	return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310..6ca80a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
 #define NV50_SOR_DP_UNK118(i,l)          (0x0061c118 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK120(i,l)          (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l)          (0x0061c128 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK130(i,l)          (0x0061c130 + (i) * 0x800 + (l) * 0x80)
 
 #define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e171064..e632339 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
 #include "nv50_display.h"
 
 static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@
 
 	dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_helper_initial_config(dev);
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		nouveau_fbcon_init(dev);
+		drm_kms_helper_poll_init(dev);
+	}
 
 	return 0;
 
@@ -563,6 +566,7 @@
 	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
 
 	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
 		nouveau_backlight_exit(dev);
 
 		if (dev_priv->channel) {
@@ -637,6 +641,48 @@
 #endif
 }
 
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct apertures_struct *aper = alloc_apertures(3);
+	if (!aper)
+		return NULL;
+
+	aper->ranges[0].base = pci_resource_start(pdev, 1);
+	aper->ranges[0].size = pci_resource_len(pdev, 1);
+	aper->count = 1;
+
+	if (pci_resource_len(pdev, 2)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+		aper->count++;
+	}
+
+	if (pci_resource_len(pdev, 3)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+		aper->count++;
+	}
+
+	return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	bool primary = false;
+	dev_priv->apertures = nouveau_get_apertures(dev);
+	if (!dev_priv->apertures)
+		return -ENOMEM;
+
+#ifdef CONFIG_X86
+	primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	
+	remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+	return 0;
+}
+
 int nouveau_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@
 	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
 		dev_priv->card_type, reg0);
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		int ret = nouveau_remove_conflicting_drivers(dev);
+		if (ret)
+			return ret;
+	}
+
 	/* map larger RAMIN aperture on NV40 cards */
 	dev_priv->ramin  = NULL;
 	if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		drm_kms_helper_poll_fini(dev);
+		nouveau_fbcon_fini(dev);
 		if (dev_priv->card_type >= NV_50)
 			nv50_display_destroy(dev);
 		else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25ce..1eeac4f 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
 void
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 
@@ -57,8 +57,8 @@
 void
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 
@@ -91,8 +91,8 @@
 void
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	uint32_t fg;
@@ -179,8 +179,8 @@
 int
 nv04_fbcon_accel_init(struct fb_info *info)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+	ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
 				   0x009f : 0x005f, NvImageBlit);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986..618355e 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@
 	return 0;
 }
 
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bits 20-22: dither mode
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ *  - bit 26: surface_src/surface_zeta valid
+ *  - bit 27: pattern valid
+ *  - bit 28: rop valid
+ *  - bit 29: beta1 valid
+ *  - bit 30: beta4 valid
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
 {
 	struct drm_device *dev = chan->dev;
 	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@
 	uint32_t tmp;
 
 	tmp  = nv_ri32(dev, instance);
-	tmp &= ~0x00038000;
-	tmp |= ((data & 7) << 15);
+	tmp &= ~mask;
+	tmp |= value;
 
 	nv_wi32(dev, instance, tmp);
 	nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
 	nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	uint32_t tmp, ctx1;
+	int class, op, valid = 1;
+
+	ctx1 = nv_ri32(dev, instance);
+	class = ctx1 & 0xff;
+	op = (ctx1 >> 15) & 7;
+	tmp  = nv_ri32(dev, instance + 0xc);
+	tmp &= ~mask;
+	tmp |= value;
+	nv_wi32(dev, instance + 0xc, tmp);
+
+	/* check for valid surf2d/surf_dst/surf_color */
+	if (!(tmp & 0x02000000))
+		valid = 0;
+	/* check for valid surf_src/surf_zeta */
+	if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+		valid = 0;
+
+	switch (op) {
+	/* SRCCOPY_AND, SRCCOPY: no extra objects required */
+	case 0:
+	case 3:
+		break;
+	/* ROP_AND: requires pattern and rop */
+	case 1:
+		if (!(tmp & 0x18000000))
+			valid = 0;
+		break;
+	/* BLEND_AND: requires beta1 */
+	case 2:
+		if (!(tmp & 0x20000000))
+			valid = 0;
+		break;
+	/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+	case 4:
+	case 5:
+		if (!(tmp & 0x40000000))
+			valid = 0;
+		break;
+	}
+
+	nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	if (data > 5)
+		return 1;
+	/* Old versions of the objects only accept first three operations. */
+	if (data > 2 && grclass < 0x40)
+		return 1;
+	nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+	/* changing operation changes set of objects needed for validation */
+	nv04_graph_set_ctx_val(chan, 0, 0);
 	return 0;
 }
 
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	uint32_t min = data & 0xffff, max;
+	uint32_t w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(chan->dev, 0x40053c, min);
+	nv_wr32(chan->dev, 0x400544, max);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	uint32_t min = data & 0xffff, max;
+	uint32_t w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(chan->dev, 0x400540, min);
+	nv_wr32(chan->dev, 0x400548, max);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx1(chan, 0x00004000, 0);
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(chan, 0x00004000, 0);
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx1(chan, 0x00004000, 0);
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(chan, 0x00004000, 0);
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+		return 0;
+	case 0x52:
+		nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+		return 0;
+	case 0x18:
+		nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+		return 0;
+	case 0x44:
+		nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+		return 0;
+	case 0x43:
+		nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+		return 0;
+	case 0x12:
+		nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+		return 0;
+	case 0x72:
+		nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+		return 0;
+	case 0x58:
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+		return 0;
+	case 0x59:
+		nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+		return 0;
+	case 0x5a:
+		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+		return 0;
+	case 0x5b:
+		nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx1(chan, 0x2000, 0);
+		return 0;
+	case 0x19:
+		nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+	case 0x30:
+		nv04_graph_set_ctx1(chan, 0x1000, 0);
+		return 0;
+	/* Yes, for some reason even the old versions of objects
+	 * accept 0x57 and not 0x17. Consistency be damned.
+	 */
+	case 0x57:
+		nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+		return 0;
+	}
+	return 1;
+}
+
 static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
 	{ 0x0150, nv04_graph_mthd_set_ref },
 	{}
 };
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+	{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x0188, nv04_graph_mthd_bind_rop },
+	{ 0x018c, nv04_graph_mthd_bind_beta1 },
+	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
 	{ 0x02fc, nv04_graph_mthd_set_operation },
 	{},
 };
 
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+	{ 0x0184, nv04_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x019c, nv04_graph_mthd_bind_surf_src },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+	{ 0x0184, nv04_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+	{ 0x0188, nv04_graph_mthd_bind_chroma },
+	{ 0x018c, nv04_graph_mthd_bind_clip },
+	{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x0194, nv04_graph_mthd_bind_rop },
+	{ 0x0198, nv04_graph_mthd_bind_beta1 },
+	{ 0x019c, nv04_graph_mthd_bind_beta4 },
+	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+	{ 0x03e4, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+	{ 0x0184, nv04_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+	{ 0x0184, nv04_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+	{ 0x0184, nv04_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x0304, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+	{ 0x0304, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+	{ 0x0184, nv04_graph_mthd_bind_clip },
+	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+	{ 0x0184, nv04_graph_mthd_bind_clip },
+	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+	{ 0x0188, nv04_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_surf_color },
+	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
+	{},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+	{},
+};
+
 struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-	{ 0x0039, false, NULL },
-	{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
-	{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
-	{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
-	{ 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+	{ 0x0038, false, NULL }, /* dvd subpicture */
+	{ 0x0039, false, NULL }, /* m2mf */
+	{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+	{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+	{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+	{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+	{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+	{ 0x0064, false, NULL }, /* nv05 iifc */
+	{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+	{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+	{ 0x0065, false, NULL }, /* nv05 ifc */
+	{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+	{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+	{ 0x0066, false, NULL }, /* nv05 sifc */
+	{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+	{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
 	{ 0x0030, false, NULL }, /* null */
 	{ 0x0042, false, NULL }, /* surf2d */
 	{ 0x0043, false, NULL }, /* rop */
 	{ 0x0012, false, NULL }, /* beta1 */
 	{ 0x0072, false, NULL }, /* beta4 */
 	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x0018, false, NULL }, /* nv01 pattern */
+	{ 0x0044, false, NULL }, /* nv04 pattern */
 	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0053, false, NULL }, /* surf3d */
+	{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+	{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
 	{ 0x0054, false, NULL }, /* tex_tri */
 	{ 0x0055, false, NULL }, /* multitex_tri */
+	{ 0x0017, false, NULL }, /* nv01 chroma */
+	{ 0x0057, false, NULL }, /* nv04 chroma */
+	{ 0x0058, false, NULL }, /* surf_dst */
+	{ 0x0059, false, NULL }, /* surf_src */
+	{ 0x005a, false, NULL }, /* surf_color */
+	{ 0x005b, false, NULL }, /* surf_zeta */
+	{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+	{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+	{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+	{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+	{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+	{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
 	{ 0x506e, true, nv04_graph_mthds_sw },
 	{}
 };
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c3..9b5c974 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
 
 /* TODO:
  *  - get vs count from 0x1540
- *  - document unimplemented bits compared to nvidia
- *    - nsource handling
- *    - R0 & 0x0200 handling
- *    - single-vs handling
- *    - 400314 bit 0
  */
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 0000000..2cdc2bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+	      int *N1, int *M1, int *N2, int *M2, int *P)
+{
+	struct nouveau_pll_vals pll_vals;
+	int ret;
+
+	ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+	if (ret <= 0)
+		return ret;
+
+	*N1 = pll_vals.N1;
+	*M1 = pll_vals.M1;
+	*N2 = pll_vals.N2;
+	*M2 = pll_vals.M2;
+	*P = pll_vals.log2P;
+	return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+	       int *N, int *fN, int *M, int *P)
+{
+	fixed20_12 fb_div, a, b;
+
+	*P = pll->vco1.maxfreq / clk;
+	if (*P > pll->max_p)
+		*P = pll->max_p;
+	if (*P < pll->min_p)
+		*P = pll->min_p;
+
+	/* *M = ceil(refclk / pll->vco.max_inputfreq); */
+	a.full = dfixed_const(pll->refclk);
+	b.full = dfixed_const(pll->vco1.max_inputfreq);
+	a.full = dfixed_div(a, b);
+	a.full = dfixed_ceil(a);
+	*M = dfixed_trunc(a);
+
+	/* fb_div = (vco * *M) / refclk; */
+	fb_div.full = dfixed_const(clk * *P);
+	fb_div.full = dfixed_mul(fb_div, a);
+	a.full = dfixed_const(pll->refclk);
+	fb_div.full = dfixed_div(fb_div, a);
+
+	/* *N = floor(fb_div); */
+	a.full = dfixed_floor(fb_div);
+	*N = dfixed_trunc(fb_div);
+
+	/* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+	b.full = dfixed_const(8192);
+	a.full = dfixed_mul(a, b);
+	fb_div.full = dfixed_mul(fb_div, b);
+	fb_div.full = fb_div.full - a.full;
+	*fN = dfixed_trunc(fb_div) - 4096;
+	*fN &= 0xffff;
+
+	return clk;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cfabeb9..b4e4a3b 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@
 int
 nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
 {
-	uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
-	struct nouveau_pll_vals pll;
-	struct pll_lims limits;
+	uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+	struct pll_lims pll;
 	uint32_t reg1, reg2;
-	int ret;
+	int ret, N1, M1, N2, M2, P;
 
-	ret = get_pll_limits(dev, pll_reg, &limits);
+	ret = get_pll_limits(dev, reg, &pll);
 	if (ret)
 		return ret;
 
-	ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
-	if (ret <= 0)
-		return ret;
+	if (pll.vco2.maxfreq) {
+		ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+		if (ret <= 0)
+			return 0;
 
-	if (limits.vco2.maxfreq) {
-		reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
-		reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
-		nv_wr32(dev, pll_reg, 0x10000611);
-		nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
-		nv_wr32(dev, pll_reg + 8,
-			reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+		NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+			 pclk, ret, N1, M1, N2, M2, P);
+
+		reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+		reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+		nv_wr32(dev, reg, 0x10000611);
+		nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+		nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
 	} else {
-		reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
-		nv_wr32(dev, pll_reg, 0x50000610);
-		nv_wr32(dev, pll_reg + 4, reg1 |
-			(pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+		ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+		if (ret <= 0)
+			return 0;
+
+		NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+			 pclk, ret, N1, N2, M1, P);
+
+		reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+		nv_wr32(dev, reg, 0x50000610);
+		nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+		nv_wr32(dev, reg + 8, N2);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c..580a5d1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
 #include "drm_crtc_helper.h"
 
 static void
@@ -783,6 +784,37 @@
 }
 
 static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+	int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+	struct drm_encoder *encoder;
+	uint32_t tmp, unk0 = 0, unk1 = 0;
+
+	if (dcb->type != OUTPUT_DP)
+		return;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (nv_encoder->dcb == dcb) {
+			unk0 = nv_encoder->dp.unk0;
+			unk1 = nv_encoder->dp.unk1;
+			break;
+		}
+	}
+
+	if (unk0 || unk1) {
+		tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+		tmp &= 0xfffffe03;
+		nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+		tmp  = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+		tmp &= 0xfef080c0;
+		nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+	}
+}
+
+static void
 nv50_display_unk20_handler(struct drm_device *dev)
 {
 	struct dcb_entry *dcbent;
@@ -805,6 +837,8 @@
 
 	nouveau_bios_run_display_table(dev, dcbent, script, pclk);
 
+	nv50_display_unk20_dp_hack(dev, dcbent);
+
 	tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
 	tmp &= ~0x000000f;
 	nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@
 	nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
 	if (dev_priv->chipset >= 0x90)
 		nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+	drm_helper_hpd_irq_event(dev);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7..6bf025c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
 void
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 
@@ -49,8 +49,8 @@
 void
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 
@@ -84,8 +84,8 @@
 void
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@
 int
 nv50_fbcon_accel_init(struct fb_info *info)
 {
-	struct nouveau_fbcon_par *par = info->par;
-	struct drm_device *dev = par->dev;
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 0c68698..b11eaf9 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -321,18 +321,23 @@
 	encoder->possible_clones = 0;
 
 	if (nv_encoder->dcb->type == OUTPUT_DP) {
-		uint32_t mc, or = nv_encoder->or;
+		int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+		uint32_t tmp;
 
 		if (dev_priv->chipset < 0x90 ||
 		    dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
-			mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
+			tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
 		else
-			mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+			tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
 
-		switch ((mc & 0x00000f00) >> 8) {
+		switch ((tmp & 0x00000f00) >> 8) {
 		case 8:
 		case 9:
-			nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
+			nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+			tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+			nv_encoder->dp.unk0 = tmp & 0x000001fc;
+			tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+			nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
 			break;
 		default:
 			break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5319d9e..1bc72c3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -5742,6 +5742,9 @@
 #define ATOM_PP_THERMALCONTROLLER_RV6xx     7
 #define ATOM_PP_THERMALCONTROLLER_RV770     8
 #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
 
 typedef struct _ATOM_PPLIB_STATE
 {
@@ -5749,6 +5752,26 @@
     UCHAR ucClockStateIndices[1]; // variable-sized
 } ATOM_PPLIB_STATE;
 
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
 //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
 #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
 #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@
 #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
 #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
 #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
 
 typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 {
@@ -5797,6 +5826,21 @@
 
 } ATOM_PPLIB_POWERPLAYTABLE;
 
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
 //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
 #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
@@ -5816,7 +5860,9 @@
 #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
 #define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
 #define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
 
 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
 #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
@@ -5840,9 +5886,15 @@
 
 #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
 #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC                        0x00004000
 #define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
 
-#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
 
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+
 typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
 {
       USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@
 #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
 #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
 #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF    16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
 typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b..03dd6c4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
 #include "radeon.h"
 #include "atom.h"
 #include "atom-bits.h"
@@ -245,25 +245,27 @@
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
 		atombios_enable_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
 		atombios_blank_crtc(crtc, ATOM_DISABLE);
-		/* XXX re-enable when interrupt support is added */
-		if (!ASIC_IS_DCE4(rdev))
-			drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
 		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		/* XXX re-enable when interrupt support is added */
-		if (!ASIC_IS_DCE4(rdev))
-			drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
 		atombios_blank_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
 		atombios_enable_crtc(crtc, ATOM_DISABLE);
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
 		break;
 	}
 }
@@ -1160,6 +1162,12 @@
 				     struct drm_display_mode *mode,
 				     struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* adjust pm to upcoming mode change */
+	radeon_pm_compute_clocks(rdev);
+
 	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
 		return false;
 	return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 28b31c6..abffb14 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -351,7 +351,7 @@
 	args.v1.ucChannelID = chan->rec.i2c_id;
 	args.v1.ucDelay = delay / 10;
 	if (ASIC_IS_DCE4(rdev))
-		args.v2.ucHPD_ID = chan->rec.hpd_id;
+		args.v2.ucHPD_ID = chan->rec.hpd;
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e..8c8e4d3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,40 +28,236 @@
 #include "radeon.h"
 #include "radeon_asic.h"
 #include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
 #include "atom.h"
 #include "avivod.h"
 #include "evergreen_reg.h"
 
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
 	bool connected = false;
-	/* XXX */
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_3:
+		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_4:
+		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_5:
+		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_6:
+		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+			break;
+	default:
+		break;
+	}
+
 	return connected;
 }
 
 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
 				enum radeon_hpd_id hpd)
 {
-	/* XXX */
+	u32 tmp;
+	bool connected = evergreen_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_3:
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_4:
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_5:
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+	case RADEON_HPD_6:
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
 }
 
 void evergreen_hpd_init(struct radeon_device *rdev)
 {
-	/* XXX */
-}
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
 
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
-{
-	/* XXX */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, tmp);
+			rdev->irq.hpd[0] = true;
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, tmp);
+			rdev->irq.hpd[1] = true;
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, tmp);
+			rdev->irq.hpd[2] = true;
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, tmp);
+			rdev->irq.hpd[3] = true;
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, tmp);
+			rdev->irq.hpd[4] = true;
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, tmp);
+			rdev->irq.hpd[5] = true;
+			break;
+		default:
+			break;
+		}
+	}
+	if (rdev->irq.installed)
+		evergreen_irq_set(rdev);
 }
 
 void evergreen_hpd_fini(struct radeon_device *rdev)
 {
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, 0);
+			rdev->irq.hpd[0] = false;
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, 0);
+			rdev->irq.hpd[1] = false;
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, 0);
+			rdev->irq.hpd[2] = false;
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, 0);
+			rdev->irq.hpd[3] = false;
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, 0);
+			rdev->irq.hpd[4] = false;
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, 0);
+			rdev->irq.hpd[5] = false;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
 	/* XXX */
 }
 
@@ -83,10 +279,31 @@
 /*
  * GART
  */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
 int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int r, i;
+	int r;
 
 	if (rdev->gart.table.vram.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@
 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(rdev->dummy_page.addr >> 12));
-	for (i = 1; i < 7; i++)
-		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
 
-	r600_pcie_gart_tlb_flush(rdev);
+	evergreen_pcie_gart_tlb_flush(rdev);
 	rdev->gart.ready = true;
 	return 0;
 }
@@ -132,11 +348,11 @@
 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i, r;
+	int r;
 
 	/* Disable all tables */
-	for (i = 0; i < 7; i++)
-		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
 
 	/* Setup L2 cache */
 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@
 void evergreen_agp_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i;
 
 	/* Setup L2 cache */
 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@
 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
-	for (i = 0; i < 7; i++)
-		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
 }
 
 static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@
 	rv515_vga_render_disable(rdev);
 }
 
-#if 0
 /*
  * CP.
  */
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
-	/* XXX */
-}
-
 
 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 {
-	/* XXX */
+	const __be32 *fw_data;
+	int i;
 
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
 	return 0;
 }
 
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	WREG32(CP_RB_WPTR, 0);
+	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	rdev->cp.rptr = RREG32(CP_RB_RPTR);
+	rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+	r600_cp_start(rdev);
+	rdev->cp.ready = true;
+	r = radeon_ring_test(rdev);
+	if (r) {
+		rdev->cp.ready = false;
+		return r;
+	}
+	return 0;
+}
 
 /*
  * Core functions
  */
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+						  u32 num_tile_pipes,
 						  u32 num_backends,
 						  u32 backend_disable_mask)
 {
 	u32 backend_map = 0;
+	u32 enabled_backends_mask = 0;
+	u32 enabled_backends_count = 0;
+	u32 cur_pipe;
+	u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+	u32 cur_backend = 0;
+	u32 i;
+	bool force_no_swizzle;
+
+	if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+		num_tile_pipes = EVERGREEN_MAX_PIPES;
+	if (num_tile_pipes < 1)
+		num_tile_pipes = 1;
+	if (num_backends > EVERGREEN_MAX_BACKENDS)
+		num_backends = EVERGREEN_MAX_BACKENDS;
+	if (num_backends < 1)
+		num_backends = 1;
+
+	for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+		if (((backend_disable_mask >> i) & 1) == 0) {
+			enabled_backends_mask |= (1 << i);
+			++enabled_backends_count;
+		}
+		if (enabled_backends_count == num_backends)
+			break;
+	}
+
+	if (enabled_backends_count == 0) {
+		enabled_backends_mask = 1;
+		enabled_backends_count = 1;
+	}
+
+	if (enabled_backends_count != num_backends)
+		num_backends = enabled_backends_count;
+
+	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+		force_no_swizzle = false;
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+	case CHIP_JUNIPER:
+	default:
+		force_no_swizzle = true;
+		break;
+	}
+	if (force_no_swizzle) {
+		bool last_backend_enabled = false;
+
+		force_no_swizzle = false;
+		for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+			if (((enabled_backends_mask >> i) & 1) == 1) {
+				if (last_backend_enabled)
+					force_no_swizzle = true;
+				last_backend_enabled = true;
+			} else
+				last_backend_enabled = false;
+		}
+	}
+
+	switch (num_tile_pipes) {
+	case 1:
+	case 3:
+	case 5:
+	case 7:
+		DRM_ERROR("odd number of pipes!\n");
+		break;
+	case 2:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		break;
+	case 4:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 1;
+			swizzle_pipe[3] = 3;
+		}
+		break;
+	case 6:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 1;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 5;
+		}
+		break;
+	case 8:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+			swizzle_pipe[6] = 6;
+			swizzle_pipe[7] = 7;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 6;
+			swizzle_pipe[4] = 1;
+			swizzle_pipe[5] = 3;
+			swizzle_pipe[6] = 5;
+			swizzle_pipe[7] = 7;
+		}
+		break;
+	}
+
+	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+		while (((1 << cur_backend) & enabled_backends_mask) == 0)
+			cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+	}
 
 	return backend_map;
 }
-#endif
 
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
-	/* XXX */
+	u32 cc_rb_backend_disable = 0;
+	u32 cc_gc_shader_pipe_config;
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 gb_backend_map;
+	u32 grbm_gfx_index;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 sq_config;
+	u32 sq_lds_resource_mgmt;
+	u32 sq_gpr_resource_mgmt_1;
+	u32 sq_gpr_resource_mgmt_2;
+	u32 sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt;
+	u32 sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1;
+	u32 sq_stack_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_3;
+	u32 vgt_cache_invalidation;
+	u32 hdp_host_path_cntl;
+	int i, j, num_shader_engines, ps_thread_count;
+
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_JUNIPER:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_REDWOOD:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_CEDAR:
+	default:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+	cc_gc_shader_pipe_config |=
+		INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+				  & EVERGREEN_MAX_PIPES_MASK);
+	cc_gc_shader_pipe_config |=
+		INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+			       & EVERGREEN_MAX_SIMDS_MASK);
+
+	cc_rb_backend_disable =
+		BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+				& EVERGREEN_MAX_BACKENDS_MASK);
+
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	switch (rdev->config.evergreen.max_tile_pipes) {
+	case 1:
+	default:
+		gb_addr_config |= NUM_PIPES(0);
+		break;
+	case 2:
+		gb_addr_config |= NUM_PIPES(1);
+		break;
+	case 4:
+		gb_addr_config |= NUM_PIPES(2);
+		break;
+	case 8:
+		gb_addr_config |= NUM_PIPES(3);
+		break;
+	}
+
+	gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+	gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+	gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+	gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+	if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+		gb_addr_config |= ROW_SIZE(2);
+	else
+		gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+	if (rdev->ddev->pdev->device == 0x689e) {
+		u32 efuse_straps_4;
+		u32 efuse_straps_3;
+		u8 efuse_box_bit_131_124;
+
+		WREG32(RCU_IND_INDEX, 0x204);
+		efuse_straps_4 = RREG32(RCU_IND_DATA);
+		WREG32(RCU_IND_INDEX, 0x203);
+		efuse_straps_3 = RREG32(RCU_IND_DATA);
+		efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+		switch(efuse_box_bit_131_124) {
+		case 0x00:
+			gb_backend_map = 0x76543210;
+			break;
+		case 0x55:
+			gb_backend_map = 0x77553311;
+			break;
+		case 0x56:
+			gb_backend_map = 0x77553300;
+			break;
+		case 0x59:
+			gb_backend_map = 0x77552211;
+			break;
+		case 0x66:
+			gb_backend_map = 0x77443300;
+			break;
+		case 0x99:
+			gb_backend_map = 0x66552211;
+			break;
+		case 0x5a:
+			gb_backend_map = 0x77552200;
+			break;
+		case 0xaa:
+			gb_backend_map = 0x66442200;
+			break;
+		case 0x95:
+			gb_backend_map = 0x66553311;
+			break;
+		default:
+			DRM_ERROR("bad backend map, using default\n");
+			gb_backend_map =
+				evergreen_get_tile_pipe_to_backend_map(rdev,
+								       rdev->config.evergreen.max_tile_pipes,
+								       rdev->config.evergreen.max_backends,
+								       ((EVERGREEN_MAX_BACKENDS_MASK <<
+								   rdev->config.evergreen.max_backends) &
+									EVERGREEN_MAX_BACKENDS_MASK));
+			break;
+		}
+	} else if (rdev->ddev->pdev->device == 0x68b9) {
+		u32 efuse_straps_3;
+		u8 efuse_box_bit_127_124;
+
+		WREG32(RCU_IND_INDEX, 0x203);
+		efuse_straps_3 = RREG32(RCU_IND_DATA);
+		efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+		switch(efuse_box_bit_127_124) {
+		case 0x0:
+			gb_backend_map = 0x00003210;
+			break;
+		case 0x5:
+		case 0x6:
+		case 0x9:
+		case 0xa:
+			gb_backend_map = 0x00003311;
+			break;
+		default:
+			DRM_ERROR("bad backend map, using default\n");
+			gb_backend_map =
+				evergreen_get_tile_pipe_to_backend_map(rdev,
+								       rdev->config.evergreen.max_tile_pipes,
+								       rdev->config.evergreen.max_backends,
+								       ((EVERGREEN_MAX_BACKENDS_MASK <<
+								   rdev->config.evergreen.max_backends) &
+									EVERGREEN_MAX_BACKENDS_MASK));
+			break;
+		}
+	} else
+		gb_backend_map =
+			evergreen_get_tile_pipe_to_backend_map(rdev,
+							       rdev->config.evergreen.max_tile_pipes,
+							       rdev->config.evergreen.max_backends,
+							       ((EVERGREEN_MAX_BACKENDS_MASK <<
+								 rdev->config.evergreen.max_backends) &
+								EVERGREEN_MAX_BACKENDS_MASK));
+
+	WREG32(GB_BACKEND_MAP, gb_backend_map);
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+		u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+		u32 sp = cc_gc_shader_pipe_config;
+		u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+		if (i == num_shader_engines) {
+			rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+			sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+		}
+
+		WREG32(GRBM_GFX_INDEX, gfx);
+		WREG32(RLC_GFX_INDEX, gfx);
+
+		WREG32(CC_RB_BACKEND_DISABLE, rb);
+		WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+		WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+		WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+        }
+
+	grbm_gfx_index |= SE_BROADCAST_WRITES;
+	WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+	WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+			     SYNC_GRADIENT |
+			     SYNC_WALKER |
+			     SYNC_ALIGNER));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(SPI_CONFIG_CNTL, 0);
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	if (rdev->family == CHIP_CEDAR)
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+	if (rdev->family == CHIP_CEDAR)
+		ps_thread_count = 96;
+	else
+		ps_thread_count = 128;
+
+	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+	sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+	sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+	sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+	sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if (rdev->family == CHIP_CEDAR)
+		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+	else
+		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+
 }
 
 int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@
 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	/* FIXME remove this once we support unmappable VRAM */
-	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-		rdev->mc.real_vram_size = rdev->mc.aper_size;
-	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
 }
 
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
 {
 	/* FIXME: implement for evergreen */
+	return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 srbm_reset = 0;
+	u32 grbm_reset = 0;
+
+	dev_info(rdev->dev, "GPU softreset \n");
+	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(SRBM_STATUS));
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	/* reset all the gfx blocks */
+	grbm_reset = (SOFT_RESET_CP |
+		      SOFT_RESET_CB |
+		      SOFT_RESET_DB |
+		      SOFT_RESET_PA |
+		      SOFT_RESET_SC |
+		      SOFT_RESET_SPI |
+		      SOFT_RESET_SH |
+		      SOFT_RESET_SX |
+		      SOFT_RESET_TC |
+		      SOFT_RESET_TA |
+		      SOFT_RESET_VC |
+		      SOFT_RESET_VGT);
+
+	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+	WREG32(GRBM_SOFT_RESET, grbm_reset);
+	(void)RREG32(GRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(GRBM_SOFT_RESET, 0);
+	(void)RREG32(GRBM_SOFT_RESET);
+
+	/* reset all the system blocks */
+	srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+	dev_info(rdev->dev, "  SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+	WREG32(SRBM_SOFT_RESET, srbm_reset);
+	(void)RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+	(void)RREG32(SRBM_SOFT_RESET);
+	/* Wait a little for things to settle down */
+	udelay(50);
+	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(SRBM_STATUS));
+	/* After reset we need to reinit the asic as GPU often endup in an
+	 * incoherent state.
+	 */
+	atom_asic_init(rdev->mode_info.atom_context);
+	evergreen_mc_resume(rdev, &save);
 	return 0;
 }
 
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+	return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	switch (crtc) {
+	case 0:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	case 1:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	case 2:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+	case 3:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	case 4:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+	case 5:
+		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	default:
+		return 0;
+	}
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL, 0);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD2_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD3_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD4_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD5_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+	u32 grbm_int_cntl = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		evergreen_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+	if (rdev->irq.sw_int) {
+		DRM_DEBUG("evergreen_irq_set: sw int\n");
+		cp_int_cntl |= RB_INT_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5]) {
+		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.gui_idle) {
+		DRM_DEBUG("gui idle\n");
+		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+	}
+
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+	WREG32(DC_HPD1_INT_CONTROL, hpd1);
+	WREG32(DC_HPD2_INT_CONTROL, hpd2);
+	WREG32(DC_HPD3_INT_CONTROL, hpd3);
+	WREG32(DC_HPD4_INT_CONTROL, hpd4);
+	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+	return 0;
+}
+
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+				     u32 *disp_int,
+				     u32 *disp_int_cont,
+				     u32 *disp_int_cont2,
+				     u32 *disp_int_cont3,
+				     u32 *disp_int_cont4,
+				     u32 *disp_int_cont5)
+{
+	u32 tmp;
+
+	*disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	*disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	*disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	*disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	*disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+	if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+	if (*disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+	u32 disp_int, disp_int_cont, disp_int_cont2;
+	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+	evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+	evergreen_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	/* XXX use writeback */
+	wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr = evergreen_get_ih_wptr(rdev);
+	u32 rptr = rdev->ih.rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	u32 disp_int, disp_int_cont, disp_int_cont2;
+	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+	unsigned long flags;
+	bool queue_hotplug = false;
+
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+	if (!rdev->ih.enabled)
+		return IRQ_NONE;
+
+	spin_lock_irqsave(&rdev->ih.lock, flags);
+
+	if (rptr == wptr) {
+		spin_unlock_irqrestore(&rdev->ih.lock, flags);
+		return IRQ_NONE;
+	}
+	if (rdev->shutdown) {
+		spin_unlock_irqrestore(&rdev->ih.lock, flags);
+		return IRQ_NONE;
+	}
+
+restart_ih:
+	/* display interrupts */
+	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+	rdev->ih.wptr = wptr;
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  rdev->ih.ring[ring_index] & 0xff;
+		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 0);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (disp_int & LB_D1_VLINE_INTERRUPT) {
+					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 1);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D2 vline */
+				if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 2);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D3 vblank\n");
+				}
+				break;
+			case 1: /* D3 vline */
+				if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D3 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 3);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D4 vblank\n");
+				}
+				break;
+			case 1: /* D4 vline */
+				if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D4 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 4);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D5 vblank\n");
+				}
+				break;
+			case 1: /* D5 vline */
+				if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D5 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 5);
+					wake_up(&rdev->irq.vblank_queue);
+					disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D6 vblank\n");
+				}
+				break;
+			case 1: /* D6 vline */
+				if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D6 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (disp_int & DC_HPD1_INTERRUPT) {
+					disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (disp_int_cont & DC_HPD2_INTERRUPT) {
+					disp_int_cont &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 2:
+				if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 3:
+				if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 4:
+				if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 5:
+				if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: CP EOP\n");
+			rdev->pm.gui_idle = true;
+			wake_up(&rdev->irq.idle_queue);
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	/* make sure wptr hasn't changed while processing */
+	wptr = evergreen_get_ih_wptr(rdev);
+	if (wptr != rdev->ih.wptr)
+		goto restart_ih;
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	spin_unlock_irqrestore(&rdev->ih.lock, flags);
+	return IRQ_HANDLED;
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
-#if 0
 	int r;
 
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@
 			return r;
 		}
 	}
-#endif
+
 	evergreen_mc_program(rdev);
-#if 0
 	if (rdev->flags & RADEON_IS_AGP) {
-		evergreem_agp_enable(rdev);
+		evergreen_agp_enable(rdev);
 	} else {
 		r = evergreen_pcie_gart_enable(rdev);
 		if (r)
 			return r;
 	}
-#endif
 	evergreen_gpu_init(rdev);
 #if 0
 	if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@
 		DRM_ERROR("failed to pin blit object %d\n", r);
 		return r;
 	}
+#endif
 
 	/* Enable IRQ */
 	r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@
 		radeon_irq_kms_fini(rdev);
 		return r;
 	}
-	r600_irq_set(rdev);
+	evergreen_irq_set(rdev);
 
 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
 	if (r)
@@ -552,12 +1983,12 @@
 	r = evergreen_cp_load_microcode(rdev);
 	if (r)
 		return r;
-	r = r600_cp_resume(rdev);
+	r = evergreen_cp_resume(rdev);
 	if (r)
 		return r;
 	/* write back buffer are not vital so don't worry about failure */
 	r600_wb_enable(rdev);
-#endif
+
 	return 0;
 }
 
@@ -582,13 +2013,13 @@
 		DRM_ERROR("r600 startup failed on resume\n");
 		return r;
 	}
-#if 0
+
 	r = r600_ib_test(rdev);
 	if (r) {
 		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
 		return r;
 	}
-#endif
+
 	return r;
 
 }
@@ -597,12 +2028,14 @@
 {
 #if 0
 	int r;
-
+#endif
 	/* FIXME: we should wait for ring to be empty */
 	r700_cp_stop(rdev);
 	rdev->cp.ready = false;
+	evergreen_irq_suspend(rdev);
 	r600_wb_disable(rdev);
 	evergreen_pcie_gart_disable(rdev);
+#if 0
 	/* unpin shaders bo */
 	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 	if (likely(r == 0)) {
@@ -682,8 +2115,6 @@
 	r = radeon_clocks_init(rdev);
 	if (r)
 		return r;
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* Fence driver */
 	r = radeon_fence_driver_init(rdev);
 	if (r)
@@ -702,7 +2133,7 @@
 	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
-#if 0
+
 	r = radeon_irq_kms_init(rdev);
 	if (r)
 		return r;
@@ -716,14 +2147,16 @@
 	r = r600_pcie_gart_init(rdev);
 	if (r)
 		return r;
-#endif
+
 	rdev->accel_working = false;
 	r = evergreen_startup(rdev);
 	if (r) {
-		evergreen_suspend(rdev);
-		/*r600_wb_fini(rdev);*/
-		/*radeon_ring_fini(rdev);*/
-		/*evergreen_pcie_gart_fini(rdev);*/
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_wb_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		evergreen_pcie_gart_fini(rdev);
 		rdev->accel_working = false;
 	}
 	if (rdev->accel_working) {
@@ -743,16 +2176,12 @@
 
 void evergreen_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
-	evergreen_suspend(rdev);
-#if 0
-	r600_blit_fini(rdev);
+	/*r600_blit_fini(rdev);*/
+	r700_cp_fini(rdev);
+	r600_wb_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_irq_kms_fini(rdev);
-	radeon_ring_fini(rdev);
-	r600_wb_fini(rdev);
 	evergreen_pcie_gart_fini(rdev);
-#endif
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f7c7c96..af86af8 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -164,8 +164,12 @@
 #define EVERGREEN_CRTC5_REGISTER_OFFSET                 (0x129f0 - 0x6df0)
 
 /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x6e34
 #define EVERGREEN_CRTC_CONTROL                          0x6e70
 #       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS                           0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 
 #define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 0000000..93e9e17ad
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS           256
+#define EVERGREEN_MAX_TEMP_GPRS         16
+#define EVERGREEN_MAX_SH_THREADS        256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES  4096
+#define EVERGREEN_MAX_FRC_EOV_CNT       16384
+#define EVERGREEN_MAX_BACKENDS          8
+#define EVERGREEN_MAX_BACKENDS_MASK     0xFF
+#define EVERGREEN_MAX_SIMDS             16
+#define EVERGREEN_MAX_SIMDS_MASK        0xFFFF
+#define EVERGREEN_MAX_PIPES             8
+#define EVERGREEN_MAX_PIPES_MASK        0xFF
+#define EVERGREEN_MAX_LDS_NUM           0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX           			0x100
+#define RCU_IND_DATA            			0x104
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+#define RLC_GFX_INDEX           			0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define		WRITE_DIS      				(1 << 0)
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		ROW_SIZE(x)             		((x) << 28)
+#define GB_BACKEND_MAP  				0x98FC
+#define DMIF_ADDR_CONFIG  				0xBD4
+#define HDP_ADDR_CONFIG  				0x2F48
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_DEBUG					0xC1FC
+
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VC					(1 << 13)
+#define		SOFT_RESET_VGT					(1 << 14)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		CS_PRIO(x)					((x) << 18)
+#define		LS_PRIO(x)					((x) << 20)
+#define		HS_PRIO(x)					((x) << 22)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_GPR_RESOURCE_MGMT_3				0x8C0C
+#define		NUM_HS_GPRS(x)					((x) << 0)
+#define		NUM_LS_GPRS(x)					((x) << 16)
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C18
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+#define	SQ_THREAD_RESOURCE_MGMT_2			0x8C1C
+#define		NUM_HS_THREADS(x)				((x) << 0)
+#define		NUM_LS_THREADS(x)				((x) << 8)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C20
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C24
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_3			0x8C28
+#define		NUM_HS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_LS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define	SQ_LDS_RESOURCE_MGMT    			0x8E2C
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MISC						0x28350
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT1_CNTL				0x1414
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+
+#define	SRBM_STATUS				        0x0E50
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SRBM_SOFT_RESET_ALL_MASK    	       	0x00FEEFA6
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB		       	(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define CP_INT_CNTL                                     0xc124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define SCRATCH_INT_ENABLE                       (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define IB2_INT_ENABLE                           (1 << 29)
+#       define IB1_INT_ENABLE                           (1 << 30)
+#       define RB_INT_ENABLE                            (1 << 31)
+#define CP_INT_STATUS                                   0xc128
+#       define SCRATCH_INT_STAT                         (1 << 25)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define IB2_INT_STAT                             (1 << 29)
+#       define IB1_INT_STAT                             (1 << 30)
+#       define RB_INT_STAT                              (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6050
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+#define	DACB_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b..cc004b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
 #include "rs100d.h"
 #include "rv200d.h"
 #include "rv250d.h"
+#include "atom.h"
 
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
@@ -67,6 +68,264 @@
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	switch (rdev->pm.dynpm_planned_action) {
+	case DYNPM_ACTION_MINIMUM:
+		rdev->pm.requested_power_state_index = 0;
+		rdev->pm.dynpm_can_downclock = false;
+		break;
+	case DYNPM_ACTION_DOWNCLOCK:
+		if (rdev->pm.current_power_state_index == 0) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_downclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = 0; i < rdev->pm.num_power_states; i++) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i >= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index - 1;
+		}
+		/* don't use the power state if crtcs are active and no display flag is set */
+		if ((rdev->pm.active_crtc_count > 0) &&
+		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+		     RADEON_PM_MODE_NO_DISPLAY)) {
+			rdev->pm.requested_power_state_index++;
+		}
+		break;
+	case DYNPM_ACTION_UPCLOCK:
+		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_upclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i <= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index + 1;
+		}
+		break;
+	case DYNPM_ACTION_DEFAULT:
+		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.dynpm_can_upclock = false;
+		break;
+	case DYNPM_ACTION_NONE:
+	default:
+		DRM_ERROR("Requested mode for not defined action\n");
+		return;
+	}
+	/* only one clock mode per power state */
+	rdev->pm.requested_clock_mode_index = 0;
+
+	DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	}
+
+	sclk_cntl = RREG32_PLL(SCLK_CNTL);
+	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+		else
+			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+	} else
+		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+			switch (voltage->delay) {
+			case 33:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+				break;
+			case 66:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+				break;
+			case 99:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+				break;
+			case 132:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+				break;
+			}
+		} else
+			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		sclk_cntl &= ~FORCE_HDP;
+	else
+		sclk_cntl |= FORCE_HDP;
+
+	WREG32_PLL(SCLK_CNTL, sclk_cntl);
+	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
 /* hpd for digital panel detect/disconnect */
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
@@ -254,6 +513,9 @@
 	if (rdev->irq.sw_int) {
 		tmp |= RADEON_SW_INT_ENABLE;
 	}
+	if (rdev->irq.gui_idle) {
+		tmp |= RADEON_GUI_IDLE_MASK;
+	}
 	if (rdev->irq.crtc_vblank_int[0]) {
 		tmp |= RADEON_CRTC_VBLANK_MASK;
 	}
@@ -288,6 +550,12 @@
 		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
 		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
+	/* the interrupt works, but the status bit is permanently asserted */
+	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+		if (!rdev->irq.gui_idle_acked)
+			irq_mask |= RADEON_GUI_IDLE_STAT;
+	}
+
 	if (irqs) {
 		WREG32(RADEON_GEN_INT_STATUS, irqs);
 	}
@@ -299,6 +567,9 @@
 	uint32_t status, msi_rearm;
 	bool queue_hotplug = false;
 
+	/* reset gui idle ack.  the status bit is broken */
+	rdev->irq.gui_idle_acked = false;
+
 	status = r100_irq_ack(rdev);
 	if (!status) {
 		return IRQ_NONE;
@@ -311,6 +582,12 @@
 		if (status & RADEON_SW_INT_TEST) {
 			radeon_fence_process(rdev);
 		}
+		/* gui idle interrupt */
+		if (status & RADEON_GUI_IDLE_STAT) {
+			rdev->irq.gui_idle_acked = true;
+			rdev->pm.gui_idle = true;
+			wake_up(&rdev->irq.idle_queue);
+		}
 		/* Vertical blank interrupts */
 		if (status & RADEON_CRTC_VBLANK_STAT) {
 			drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@
 		}
 		status = r100_irq_ack(rdev);
 	}
+	/* reset gui idle ack.  the status bit is broken */
+	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
 		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@
 	if (r100_debugfs_cp_init(rdev)) {
 		DRM_ERROR("Failed to register debugfs file for CP !\n");
 	}
-	/* Reset CP */
-	tmp = RREG32(RADEON_CP_CSQ_STAT);
-	if ((tmp & (1 << 31))) {
-		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
-		WREG32(RADEON_CP_CSQ_MODE, 0);
-		WREG32(RADEON_CP_CSQ_CNTL, 0);
-		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
-		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
-		mdelay(2);
-		WREG32(RADEON_RBBM_SOFT_RESET, 0);
-		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
-		mdelay(2);
-		tmp = RREG32(RADEON_CP_CSQ_STAT);
-		if ((tmp & (1 << 31))) {
-			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
-		}
-	} else {
-		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
-	}
-
 	if (!rdev->me_fw) {
 		r = r100_cp_init_microcode(rdev);
 		if (r) {
@@ -787,39 +1046,6 @@
 	}
 }
 
-int r100_cp_reset(struct radeon_device *rdev)
-{
-	uint32_t tmp;
-	bool reinit_cp;
-	int i;
-
-	reinit_cp = rdev->cp.ready;
-	rdev->cp.ready = false;
-	WREG32(RADEON_CP_CSQ_MODE, 0);
-	WREG32(RADEON_CP_CSQ_CNTL, 0);
-	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
-	(void)RREG32(RADEON_RBBM_SOFT_RESET);
-	udelay(200);
-	WREG32(RADEON_RBBM_SOFT_RESET, 0);
-	/* Wait to prevent race in RBBM_STATUS */
-	mdelay(1);
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RADEON_RBBM_STATUS);
-		if (!(tmp & (1 << 16))) {
-			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
-				 tmp);
-			if (reinit_cp) {
-				return r100_cp_init(rdev, rdev->cp.ring_size);
-			}
-			return 0;
-		}
-		DRM_UDELAY(1);
-	}
-	tmp = RREG32(RADEON_RBBM_STATUS);
-	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
-	return -1;
-}
-
 void r100_cp_commit(struct radeon_device *rdev)
 {
 	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@
 	return -1;
 }
 
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
 {
-	/* TODO: anythings to do here ? pipes ? */
-	r100_hdp_reset(rdev);
+	lockup->last_cp_rptr = cp->rptr;
+	lockup->last_jiffies = jiffies;
 }
 
-void r100_hdp_reset(struct radeon_device *rdev)
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev:	radeon device structure
+ * @lockup:	r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp:		radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
 {
-	uint32_t tmp;
+	unsigned long cjiffies, elapsed;
 
-	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
-	tmp |= (7 << 28);
-	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
-	(void)RREG32(RADEON_HOST_PATH_CNTL);
-	udelay(200);
-	WREG32(RADEON_RBBM_SOFT_RESET, 0);
-	WREG32(RADEON_HOST_PATH_CNTL, tmp);
-	(void)RREG32(RADEON_HOST_PATH_CNTL);
+	cjiffies = jiffies;
+	if (!time_after(cjiffies, lockup->last_jiffies)) {
+		/* likely a wrap around */
+		lockup->last_cp_rptr = cp->rptr;
+		lockup->last_jiffies = jiffies;
+		return false;
+	}
+	if (cp->rptr != lockup->last_cp_rptr) {
+		/* CP is still working no lockup */
+		lockup->last_cp_rptr = cp->rptr;
+		lockup->last_jiffies = jiffies;
+		return false;
+	}
+	elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+	if (elapsed >= 3000) {
+		/* very likely the improbable case where current
+		 * rptr is equal to last recorded, a while ago, rptr
+		 * this is more likely a false positive update tracking
+		 * information which should force us to be recall at
+		 * latter point
+		 */
+		lockup->last_cp_rptr = cp->rptr;
+		lockup->last_jiffies = jiffies;
+		return false;
+	}
+	if (elapsed >= 1000) {
+		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+		return true;
+	}
+	/* give a chance to the GPU ... */
+	return false;
 }
 
-int r100_rb2d_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
 {
-	uint32_t tmp;
-	int i;
+	u32 rbbm_status;
+	int r;
 
-	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
-	(void)RREG32(RADEON_RBBM_SOFT_RESET);
-	udelay(200);
-	WREG32(RADEON_RBBM_SOFT_RESET, 0);
-	/* Wait to prevent race in RBBM_STATUS */
+	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+		r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+		return false;
+	}
+	/* force CP activities */
+	r = radeon_ring_lock(rdev, 2);
+	if (!r) {
+		/* PACKET2 NOP */
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_unlock_commit(rdev);
+	}
+	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+}
+
+void r100_bm_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* disable bus mastering */
+	tmp = RREG32(R_000030_BUS_CNTL);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
 	mdelay(1);
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RADEON_RBBM_STATUS);
-		if (!(tmp & (1 << 26))) {
-			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
-				 tmp);
-			return 0;
-		}
-		DRM_UDELAY(1);
-	}
-	tmp = RREG32(RADEON_RBBM_STATUS);
-	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
-	return -1;
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	mdelay(1);
+	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+	mdelay(1);
 }
 
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
 {
-	uint32_t status;
+	struct r100_mc_save save;
+	u32 status, tmp;
 
-	/* reset order likely matter */
-	status = RREG32(RADEON_RBBM_STATUS);
-	/* reset HDP */
-	r100_hdp_reset(rdev);
-	/* reset rb2d */
-	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-		r100_rb2d_reset(rdev);
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
 	}
-	/* TODO: reset 3D engine */
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+					S_0000F0_SOFT_RESET_RE(1) |
+					S_0000F0_SOFT_RESET_PP(1) |
+					S_0000F0_SOFT_RESET_RB(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* reset CP */
-	status = RREG32(RADEON_RBBM_STATUS);
-	if (status & (1 << 16)) {
-		r100_cp_reset(rdev);
-	}
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
 	/* Check if GPU is idle */
-	status = RREG32(RADEON_RBBM_STATUS);
-	if (status & RADEON_RBBM_ACTIVE) {
-		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		rdev->gpu_lockup = true;
 		return -1;
 	}
-	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+	r100_mc_resume(rdev, &save);
+	dev_info(rdev->dev, "GPU reset succeed\n");
 	return 0;
 }
 
@@ -2002,11 +2315,6 @@
 		else
 			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
 	}
-	/* FIXME remove this once we support unmappable VRAM */
-	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-		rdev->mc.real_vram_size = rdev->mc.aper_size;
-	}
 }
 
 void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@
 	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
 	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
 	fixed20_12 memtcas_ff[8] = {
-		fixed_init(1),
-		fixed_init(2),
-		fixed_init(3),
-		fixed_init(0),
-		fixed_init_half(1),
-		fixed_init_half(2),
-		fixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init(0),
 	};
 	fixed20_12 memtcas_rs480_ff[8] = {
-		fixed_init(0),
-		fixed_init(1),
-		fixed_init(2),
-		fixed_init(3),
-		fixed_init(0),
-		fixed_init_half(1),
-		fixed_init_half(2),
-		fixed_init_half(3),
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init_half(3),
 	};
 	fixed20_12 memtcas2_ff[8] = {
-		fixed_init(0),
-		fixed_init(1),
-		fixed_init(2),
-		fixed_init(3),
-		fixed_init(4),
-		fixed_init(5),
-		fixed_init(6),
-		fixed_init(7),
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
 	};
 	fixed20_12 memtrbs[8] = {
-		fixed_init(1),
-		fixed_init_half(1),
-		fixed_init(2),
-		fixed_init_half(2),
-		fixed_init(3),
-		fixed_init_half(3),
-		fixed_init(4),
-		fixed_init_half(4)
+		dfixed_init(1),
+		dfixed_init_half(1),
+		dfixed_init(2),
+		dfixed_init_half(2),
+		dfixed_init(3),
+		dfixed_init_half(3),
+		dfixed_init(4),
+		dfixed_init_half(4)
 	};
 	fixed20_12 memtrbs_r4xx[8] = {
-		fixed_init(4),
-		fixed_init(5),
-		fixed_init(6),
-		fixed_init(7),
-		fixed_init(8),
-		fixed_init(9),
-		fixed_init(10),
-		fixed_init(11)
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+		dfixed_init(8),
+		dfixed_init(9),
+		dfixed_init(10),
+		dfixed_init(11)
 	};
 	fixed20_12 min_mem_eff;
 	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@
 		}
 	}
 
-	min_mem_eff.full = rfixed_const_8(0);
+	min_mem_eff.full = dfixed_const_8(0);
 	/* get modes */
 	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
 		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@
 	mclk_ff = rdev->pm.mclk;
 
 	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
-	temp_ff.full = rfixed_const(temp);
-	mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+	temp_ff.full = dfixed_const(temp);
+	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
 
 	pix_clk.full = 0;
 	pix_clk2.full = 0;
 	peak_disp_bw.full = 0;
 	if (mode1) {
-		temp_ff.full = rfixed_const(1000);
-		pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
-		pix_clk.full = rfixed_div(pix_clk, temp_ff);
-		temp_ff.full = rfixed_const(pixel_bytes1);
-		peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const(1000);
+		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+		pix_clk.full = dfixed_div(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes1);
+		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
 	}
 	if (mode2) {
-		temp_ff.full = rfixed_const(1000);
-		pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
-		pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
-		temp_ff.full = rfixed_const(pixel_bytes2);
-		peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const(1000);
+		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes2);
+		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
 	}
 
-	mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
 	if (peak_disp_bw.full >= mem_bw.full) {
 		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
 			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@
 		mem_tras = ((temp >> 12) & 0xf) + 4;
 	}
 	/* convert to FF */
-	trcd_ff.full = rfixed_const(mem_trcd);
-	trp_ff.full = rfixed_const(mem_trp);
-	tras_ff.full = rfixed_const(mem_tras);
+	trcd_ff.full = dfixed_const(mem_trcd);
+	trp_ff.full = dfixed_const(mem_trp);
+	tras_ff.full = dfixed_const(mem_tras);
 
 	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
 	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@
 		/* extra cas latency stored in bits 23-25 0-4 clocks */
 		data = (temp >> 23) & 0x7;
 		if (data < 5)
-			tcas_ff.full += rfixed_const(data);
+			tcas_ff.full += dfixed_const(data);
 	}
 
 	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@
 
 	if (rdev->flags & RADEON_IS_AGP) {
 		fixed20_12 agpmode_ff;
-		agpmode_ff.full = rfixed_const(radeon_agpmode);
-		temp_ff.full = rfixed_const_666(16);
-		sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+		agpmode_ff.full = dfixed_const(radeon_agpmode);
+		temp_ff.full = dfixed_const_666(16);
+		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
 	}
 	/* TODO PCIE lanes may affect this - agpmode == 16?? */
 
 	if (ASIC_IS_R300(rdev)) {
-		sclk_delay_ff.full = rfixed_const(250);
+		sclk_delay_ff.full = dfixed_const(250);
 	} else {
 		if ((rdev->family == CHIP_RV100) ||
 		    rdev->flags & RADEON_IS_IGP) {
 			if (rdev->mc.vram_is_ddr)
-				sclk_delay_ff.full = rfixed_const(41);
+				sclk_delay_ff.full = dfixed_const(41);
 			else
-				sclk_delay_ff.full = rfixed_const(33);
+				sclk_delay_ff.full = dfixed_const(33);
 		} else {
 			if (rdev->mc.vram_width == 128)
-				sclk_delay_ff.full = rfixed_const(57);
+				sclk_delay_ff.full = dfixed_const(57);
 			else
-				sclk_delay_ff.full = rfixed_const(41);
+				sclk_delay_ff.full = dfixed_const(41);
 		}
 	}
 
-	mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
 
 	if (rdev->mc.vram_is_ddr) {
 		if (rdev->mc.vram_width == 32) {
-			k1.full = rfixed_const(40);
+			k1.full = dfixed_const(40);
 			c  = 3;
 		} else {
-			k1.full = rfixed_const(20);
+			k1.full = dfixed_const(20);
 			c  = 1;
 		}
 	} else {
-		k1.full = rfixed_const(40);
+		k1.full = dfixed_const(40);
 		c  = 3;
 	}
 
-	temp_ff.full = rfixed_const(2);
-	mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
-	temp_ff.full = rfixed_const(c);
-	mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
-	temp_ff.full = rfixed_const(4);
-	mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
-	mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+	temp_ff.full = dfixed_const(2);
+	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+	temp_ff.full = dfixed_const(c);
+	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+	temp_ff.full = dfixed_const(4);
+	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
 	mc_latency_mclk.full += k1.full;
 
-	mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
-	mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
 
 	/*
 	  HW cursor time assuming worst case of full size colour cursor.
 	*/
-	temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
 	temp_ff.full += trcd_ff.full;
 	if (temp_ff.full < tras_ff.full)
 		temp_ff.full = tras_ff.full;
-	cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
 
-	temp_ff.full = rfixed_const(cur_size);
-	cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+	temp_ff.full = dfixed_const(cur_size);
+	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
 	/*
 	  Find the total latency for the display data.
 	*/
-	disp_latency_overhead.full = rfixed_const(8);
-	disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+	disp_latency_overhead.full = dfixed_const(8);
+	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
 	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
 	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
 
@@ -2646,16 +2954,16 @@
 		/*
 		  Find the drain rate of the display buffer.
 		*/
-		temp_ff.full = rfixed_const((16/pixel_bytes1));
-		disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const((16/pixel_bytes1));
+		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
 
 		/*
 		  Find the critical point of the display buffer.
 		*/
-		crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
-		crit_point_ff.full += rfixed_const_half(0);
+		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+		crit_point_ff.full += dfixed_const_half(0);
 
-		critical_point = rfixed_trunc(crit_point_ff);
+		critical_point = dfixed_trunc(crit_point_ff);
 
 		if (rdev->disp_priority == 2) {
 			critical_point = 0;
@@ -2726,8 +3034,8 @@
 		/*
 		  Find the drain rate of the display buffer.
 		*/
-		temp_ff.full = rfixed_const((16/pixel_bytes2));
-		disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const((16/pixel_bytes2));
+		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
 
 		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
 		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@
 			critical_point2 = 0;
 		else {
 			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
-			temp_ff.full = rfixed_const(temp);
-			temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+			temp_ff.full = dfixed_const(temp);
+			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
 			if (sclk_ff.full < temp_ff.full)
 				temp_ff.full = sclk_ff.full;
 
@@ -2757,15 +3065,15 @@
 
 			if (mode1) {
 				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
-				time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
 			} else {
 				time_disp1_drop_priority.full = 0;
 			}
 			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
-			crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
-			crit_point_ff.full += rfixed_const_half(0);
+			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+			crit_point_ff.full += dfixed_const_half(0);
 
-			critical_point2 = rfixed_trunc(crit_point_ff);
+			critical_point2 = dfixed_trunc(crit_point_ff);
 
 			if (rdev->disp_priority == 2) {
 				critical_point2 = 0;
@@ -3399,7 +3707,7 @@
 	/* Resume clock */
 	r100_clock_startup(rdev);
 	/* Initialize GPU configuration (# pipes, ...) */
-	r100_gpu_init(rdev);
+//	r100_gpu_init(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
 	r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@
 	/* Resume clock before doing reset */
 	r100_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@
 
 void r100_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@
 			return r;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@
 	r100_errata(rdev);
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize AGP */
 	if (rdev->flags & RADEON_IS_AGP) {
 		r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a63..d016b16 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
 /* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_SE(x)                    (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_SE(x)                    (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_SE                       0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define R_000030_BUS_CNTL                            0x000030
+#define   S_000030_BUS_DBL_RESYNC(x)                   (((x) & 0x1) << 0)
+#define   G_000030_BUS_DBL_RESYNC(x)                   (((x) >> 0) & 0x1)
+#define   C_000030_BUS_DBL_RESYNC                      0xFFFFFFFE
+#define   S_000030_BUS_MSTR_RESET(x)                   (((x) & 0x1) << 1)
+#define   G_000030_BUS_MSTR_RESET(x)                   (((x) >> 1) & 0x1)
+#define   C_000030_BUS_MSTR_RESET                      0xFFFFFFFD
+#define   S_000030_BUS_FLUSH_BUF(x)                    (((x) & 0x1) << 2)
+#define   G_000030_BUS_FLUSH_BUF(x)                    (((x) >> 2) & 0x1)
+#define   C_000030_BUS_FLUSH_BUF                       0xFFFFFFFB
+#define   S_000030_BUS_STOP_REQ_DIS(x)                 (((x) & 0x1) << 3)
+#define   G_000030_BUS_STOP_REQ_DIS(x)                 (((x) >> 3) & 0x1)
+#define   C_000030_BUS_STOP_REQ_DIS                    0xFFFFFFF7
+#define   S_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) & 0x1) << 4)
+#define   G_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) >> 4) & 0x1)
+#define   C_000030_BUS_PM4_READ_COMBINE_EN             0xFFFFFFEF
+#define   S_000030_BUS_WRT_COMBINE_EN(x)               (((x) & 0x1) << 5)
+#define   G_000030_BUS_WRT_COMBINE_EN(x)               (((x) >> 5) & 0x1)
+#define   C_000030_BUS_WRT_COMBINE_EN                  0xFFFFFFDF
+#define   S_000030_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 6)
+#define   G_000030_BUS_MASTER_DIS(x)                   (((x) >> 6) & 0x1)
+#define   C_000030_BUS_MASTER_DIS                      0xFFFFFFBF
+#define   S_000030_BIOS_ROM_WRT_EN(x)                  (((x) & 0x1) << 7)
+#define   G_000030_BIOS_ROM_WRT_EN(x)                  (((x) >> 7) & 0x1)
+#define   C_000030_BIOS_ROM_WRT_EN                     0xFFFFFF7F
+#define   S_000030_BM_DAC_CRIPPLE(x)                   (((x) & 0x1) << 8)
+#define   G_000030_BM_DAC_CRIPPLE(x)                   (((x) >> 8) & 0x1)
+#define   C_000030_BM_DAC_CRIPPLE                      0xFFFFFEFF
+#define   S_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) & 0x1) << 9)
+#define   G_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) >> 9) & 0x1)
+#define   C_000030_BUS_NON_PM4_READ_COMBINE_EN         0xFFFFFDFF
+#define   S_000030_BUS_XFERD_DISCARD_EN(x)             (((x) & 0x1) << 10)
+#define   G_000030_BUS_XFERD_DISCARD_EN(x)             (((x) >> 10) & 0x1)
+#define   C_000030_BUS_XFERD_DISCARD_EN                0xFFFFFBFF
+#define   S_000030_BUS_SGL_READ_DISABLE(x)             (((x) & 0x1) << 11)
+#define   G_000030_BUS_SGL_READ_DISABLE(x)             (((x) >> 11) & 0x1)
+#define   C_000030_BUS_SGL_READ_DISABLE                0xFFFFF7FF
+#define   S_000030_BIOS_DIS_ROM(x)                     (((x) & 0x1) << 12)
+#define   G_000030_BIOS_DIS_ROM(x)                     (((x) >> 12) & 0x1)
+#define   C_000030_BIOS_DIS_ROM                        0xFFFFEFFF
+#define   S_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) & 0x1) << 13)
+#define   G_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) >> 13) & 0x1)
+#define   C_000030_BUS_PCI_READ_RETRY_EN               0xFFFFDFFF
+#define   S_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) & 0x1) << 14)
+#define   G_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) >> 14) & 0x1)
+#define   C_000030_BUS_AGP_AD_STEPPING_EN              0xFFFFBFFF
+#define   S_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) & 0x1) << 15)
+#define   G_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) >> 15) & 0x1)
+#define   C_000030_BUS_PCI_WRT_RETRY_EN                0xFFFF7FFF
+#define   S_000030_BUS_RETRY_WS(x)                     (((x) & 0xF) << 16)
+#define   G_000030_BUS_RETRY_WS(x)                     (((x) >> 16) & 0xF)
+#define   C_000030_BUS_RETRY_WS                        0xFFF0FFFF
+#define   S_000030_BUS_MSTR_RD_MULT(x)                 (((x) & 0x1) << 20)
+#define   G_000030_BUS_MSTR_RD_MULT(x)                 (((x) >> 20) & 0x1)
+#define   C_000030_BUS_MSTR_RD_MULT                    0xFFEFFFFF
+#define   S_000030_BUS_MSTR_RD_LINE(x)                 (((x) & 0x1) << 21)
+#define   G_000030_BUS_MSTR_RD_LINE(x)                 (((x) >> 21) & 0x1)
+#define   C_000030_BUS_MSTR_RD_LINE                    0xFFDFFFFF
+#define   S_000030_BUS_SUSPEND(x)                      (((x) & 0x1) << 22)
+#define   G_000030_BUS_SUSPEND(x)                      (((x) >> 22) & 0x1)
+#define   C_000030_BUS_SUSPEND                         0xFFBFFFFF
+#define   S_000030_LAT_16X(x)                          (((x) & 0x1) << 23)
+#define   G_000030_LAT_16X(x)                          (((x) >> 23) & 0x1)
+#define   C_000030_LAT_16X                             0xFF7FFFFF
+#define   S_000030_BUS_RD_DISCARD_EN(x)                (((x) & 0x1) << 24)
+#define   G_000030_BUS_RD_DISCARD_EN(x)                (((x) >> 24) & 0x1)
+#define   C_000030_BUS_RD_DISCARD_EN                   0xFEFFFFFF
+#define   S_000030_ENFRCWRDY(x)                        (((x) & 0x1) << 25)
+#define   G_000030_ENFRCWRDY(x)                        (((x) >> 25) & 0x1)
+#define   C_000030_ENFRCWRDY                           0xFDFFFFFF
+#define   S_000030_BUS_MSTR_WS(x)                      (((x) & 0x1) << 26)
+#define   G_000030_BUS_MSTR_WS(x)                      (((x) >> 26) & 0x1)
+#define   C_000030_BUS_MSTR_WS                         0xFBFFFFFF
+#define   S_000030_BUS_PARKING_DIS(x)                  (((x) & 0x1) << 27)
+#define   G_000030_BUS_PARKING_DIS(x)                  (((x) >> 27) & 0x1)
+#define   C_000030_BUS_PARKING_DIS                     0xF7FFFFFF
+#define   S_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) & 0x1) << 28)
+#define   G_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) >> 28) & 0x1)
+#define   C_000030_BUS_MSTR_DISCONNECT_EN              0xEFFFFFFF
+#define   S_000030_SERR_EN(x)                          (((x) & 0x1) << 29)
+#define   G_000030_SERR_EN(x)                          (((x) >> 29) & 0x1)
+#define   C_000030_SERR_EN                             0xDFFFFFFF
+#define   S_000030_BUS_READ_BURST(x)                   (((x) & 0x1) << 30)
+#define   G_000030_BUS_READ_BURST(x)                   (((x) >> 30) & 0x1)
+#define   C_000030_BUS_READ_BURST                      0xBFFFFFFF
+#define   S_000030_BUS_RDY_READ_DLY(x)                 (((x) & 0x1) << 31)
+#define   G_000030_BUS_RDY_READ_DLY(x)                 (((x) >> 31) & 0x1)
+#define   C_000030_BUS_RDY_READ_DLY                    0x7FFFFFFF
 #define R_000040_GEN_INT_CNTL                        0x000040
 #define   S_000040_CRTC_VBLANK(x)                      (((x) & 0x1) << 0)
 #define   G_000040_CRTC_VBLANK(x)                      (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
 #define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
 #define   C_00000D_FORCE_RB                            0xEFFFFFFF
 
+/* PLL regs */
+#define SCLK_CNTL                                      0xd
+#define   FORCE_HDP                                    (1 << 17)
+#define CLK_PWRMGT_CNTL                                0x14
+#define   GLOBAL_PMAN_EN                               (1 << 10)
+#define   DISP_PM                                      (1 << 20)
+#define PLL_PWRMGT_CNTL                                0x15
+#define   MPLL_TURNOFF                                 (1 << 0)
+#define   SPLL_TURNOFF                                 (1 << 1)
+#define   PPLL_TURNOFF                                 (1 << 2)
+#define   P2PLL_TURNOFF                                (1 << 3)
+#define   TVPLL_TURNOFF                                (1 << 4)
+#define   MOBILE_SU                                    (1 << 16)
+#define   SU_SCLK_USE_BCLK                             (1 << 17)
+#define SCLK_CNTL2                                     0x1e
+#define   REDUCED_SPEED_SCLK_MODE                      (1 << 16)
+#define   REDUCED_SPEED_SCLK_SEL(x)                    ((x) << 17)
+#define MCLK_MISC                                      0x1f
+#define   EN_MCLK_TRISTATE_IN_SUSPEND                  (1 << 18)
+#define SCLK_MORE_CNTL                                 0x35
+#define   REDUCED_SPEED_SCLK_EN                        (1 << 16)
+#define   IO_CG_VOLTAGE_DROP                           (1 << 17)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 20)
+#define   VOLTAGE_DROP_SYNC                            (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN                                   0xd08
+#define   DISP_D3_GRPH_RST                             (1 << 18)
+#define   DISP_D3_SUBPIC_RST                           (1 << 19)
+#define   DISP_D3_OV0_RST                              (1 << 20)
+#define   DISP_D1D2_GRPH_RST                           (1 << 21)
+#define   DISP_D1D2_SUBPIC_RST                         (1 << 22)
+#define   DISP_D1D2_OV0_RST                            (1 << 23)
+#define   DISP_DVO_ENABLE_RST                          (1 << 24)
+#define   TV_ENABLE_RST                                (1 << 25)
+#define   AUTO_PWRUP_EN                                (1 << 26)
 
 #endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff807..b2f9efe 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
  */
 #include <linux/seq_file.h>
 #include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon_asic.h"
@@ -151,6 +152,10 @@
 	u32 tmp;
 	int r;
 
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@
 {
 	uint32_t gb_tile_config, tmp;
 
-	r100_hdp_reset(rdev);
 	if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
 	    (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
 		/* r300,r350 */
@@ -375,89 +379,85 @@
 		 rdev->num_gb_pipes, rdev->num_z_pipes);
 }
 
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
 {
-	uint32_t tmp;
-	bool reinit_cp;
-	int i;
+	u32 rbbm_status;
+	int r;
 
-	reinit_cp = rdev->cp.ready;
-	rdev->cp.ready = false;
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		WREG32(RADEON_CP_CSQ_MODE, 0);
-		WREG32(RADEON_CP_CSQ_CNTL, 0);
-		WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
-		(void)RREG32(RADEON_RBBM_SOFT_RESET);
-		udelay(200);
-		WREG32(RADEON_RBBM_SOFT_RESET, 0);
-		/* Wait to prevent race in RBBM_STATUS */
-		mdelay(1);
-		tmp = RREG32(RADEON_RBBM_STATUS);
-		if (tmp & ((1 << 20) | (1 << 26))) {
-			DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
-			/* GA still busy soft reset it */
-			WREG32(0x429C, 0x200);
-			WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
-			WREG32(R300_RE_SCISSORS_TL, 0);
-			WREG32(R300_RE_SCISSORS_BR, 0);
-			WREG32(0x24AC, 0);
-		}
-		/* Wait to prevent race in RBBM_STATUS */
-		mdelay(1);
-		tmp = RREG32(RADEON_RBBM_STATUS);
-		if (!(tmp & ((1 << 20) | (1 << 26)))) {
-			break;
-		}
+	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+		r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+		return false;
 	}
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RADEON_RBBM_STATUS);
-		if (!(tmp & ((1 << 20) | (1 << 26)))) {
-			DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
-				 tmp);
-			if (reinit_cp) {
-				return r100_cp_init(rdev, rdev->cp.ring_size);
-			}
-			return 0;
-		}
-		DRM_UDELAY(1);
+	/* force CP activities */
+	r = radeon_ring_lock(rdev, 2);
+	if (!r) {
+		/* PACKET2 NOP */
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_unlock_commit(rdev);
 	}
-	tmp = RREG32(RADEON_RBBM_STATUS);
-	DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
-	return -1;
+	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
 }
 
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
 {
-	uint32_t status;
+	struct r100_mc_save save;
+	u32 status, tmp;
 
-	/* reset order likely matter */
-	status = RREG32(RADEON_RBBM_STATUS);
-	/* reset HDP */
-	r100_hdp_reset(rdev);
-	/* reset rb2d */
-	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-		r100_rb2d_reset(rdev);
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
 	}
-	/* reset GA */
-	if (status & ((1 << 20) | (1 << 26))) {
-		r300_ga_reset(rdev);
-	}
-	/* reset CP */
-	status = RREG32(RADEON_RBBM_STATUS);
-	if (status & (1 << 16)) {
-		r100_cp_reset(rdev);
-	}
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* resetting the CP seems to be problematic sometimes it end up
+	 * hard locking the computer, but it's necessary for successfull
+	 * reset more test & playing is needed on R3XX/R4XX to find a
+	 * reliable (if any solution)
+	 */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
 	/* Check if GPU is idle */
-	status = RREG32(RADEON_RBBM_STATUS);
-	if (status & RADEON_RBBM_ACTIVE) {
-		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		rdev->gpu_lockup = true;
 		return -1;
 	}
-	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+	r100_mc_resume(rdev, &save);
+	dev_info(rdev->dev, "GPU reset succeed\n");
 	return 0;
 }
 
-
 /*
  * r300,r350,rv350,rv380 VRAM info
  */
@@ -1316,7 +1316,7 @@
 	/* Resume clock before doing reset */
 	r300_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -1344,7 +1344,6 @@
 
 void r300_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -1387,7 +1386,7 @@
 			return r;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -1400,8 +1399,6 @@
 	r300_errata(rdev);
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize AGP */
 	if (rdev->flags & RADEON_IS_AGP) {
 		r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114..968a333 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
 #define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
 #define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
 #define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 
 #define R_00000D_SCLK_CNTL                           0x00000D
 #define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4a..4415a5e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -36,6 +36,35 @@
 #include "r420d.h"
 #include "r420_reg_safe.h"
 
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
 static void r420_set_reg_safe(struct radeon_device *rdev)
 {
 	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -241,7 +270,7 @@
 	/* Resume clock before doing reset */
 	r420_clock_resume(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -274,7 +303,6 @@
 
 void r420_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -322,7 +350,7 @@
 		}
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -334,8 +362,6 @@
 
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize AGP */
 	if (rdev->flags & RADEON_IS_AGP) {
 		r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 0cf2ad2..93c9a2b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
 
 #define AVIVO_D1CRTC_CONTROL                                    0x6080
 #       define AVIVO_CRTC_EN                                    (1 << 0)
+#       define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE             (1 << 24)
 #define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
 #define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
 #define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
 #define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
 #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
 
@@ -488,6 +490,7 @@
 #define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
 #define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
 #define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION                            0x68a0
 #define AVIVO_D2CRTC_FRAME_COUNT                                0x68a4
 #define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
 
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d..34330df 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@
 {
 	unsigned pipe_select_current, gb_pipe_select, tmp;
 
-	r100_hdp_reset(rdev);
 	rv515_vga_render_disable(rdev);
 	/*
 	 * DST_PIPE_CONFIG		0x170C
@@ -209,7 +208,7 @@
 	/* Resume clock before doing reset */
 	rv515_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@
 		return -EINVAL;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@
 	}
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize AGP */
 	if (rdev->flags & RADEON_IS_AGP) {
 		r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e..44e96a2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
 #define R700_PFP_UCODE_SIZE 848
 #define R700_PM4_UCODE_SIZE 1360
 #define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@
 MODULE_FIRMWARE("radeon/RV710_me.bin");
 MODULE_FIRMWARE("radeon/R600_rlc.bin");
 MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -75,6 +90,401 @@
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	/* power state array is low to high, default is first */
+	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+		int min_power_state_index = 0;
+
+		if (rdev->pm.num_power_states > 2)
+			min_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_power_state_index = min_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.current_power_state_index == min_power_state_index) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_downclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = 0; i < rdev->pm.num_power_states; i++) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i >= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else
+					rdev->pm.requested_power_state_index =
+						rdev->pm.current_power_state_index - 1;
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_power_state_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_upclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i <= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else
+					rdev->pm.requested_power_state_index =
+						rdev->pm.current_power_state_index + 1;
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	} else {
+		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
+		/* for now just select the first power state and switch between clock modes */
+		/* power state array is low to high, default is first (0) */
+		if (rdev->pm.active_crtc_count > 1) {
+			rdev->pm.requested_power_state_index = -1;
+			/* start at 1 as we don't want the default mode */
+			for (i = 1; i < rdev->pm.num_power_states; i++) {
+				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+					continue;
+				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+					rdev->pm.requested_power_state_index = i;
+					break;
+				}
+			}
+			/* if nothing selected, grab the default state. */
+			if (rdev->pm.requested_power_state_index == -1)
+				rdev->pm.requested_power_state_index = 0;
+		} else
+			rdev->pm.requested_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index == 0) {
+					rdev->pm.requested_clock_mode_index = 0;
+					rdev->pm.dynpm_can_downclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index - 1;
+			} else {
+				rdev->pm.requested_clock_mode_index = 0;
+				rdev->pm.dynpm_can_downclock = false;
+			}
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_clock_mode_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index ==
+				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+					rdev->pm.dynpm_can_upclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index + 1;
+			} else {
+				rdev->pm.requested_clock_mode_index =
+					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+				rdev->pm.dynpm_can_upclock = false;
+			}
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	}
+
+	DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+				  enum radeon_pm_state_type ps_type,
+				  int instance)
+{
+	int i;
+	int found_instance = -1;
+
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		if (rdev->pm.power_state[i].type == ps_type) {
+			found_instance++;
+			if (found_instance == instance)
+				return i;
+		}
+	}
+	/* return default if no match */
+	return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states == 2) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else if (rdev->pm.num_power_states == 3) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	}
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+	if (rdev->family == CHIP_R600) {
+		/* XXX */
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		if (rdev->pm.num_power_states < 4) {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		} else {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			if (rdev->flags & RADEON_IS_MOBILITY) {
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+			} else {
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+			}
+			/* high sh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			if (rdev->flags & RADEON_IS_MOBILITY) {
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+			} else {
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+			}
+			/* high mh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		}
+	}
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+		return false;
+	else
+		return true;
+}
 
 /* hpd for digital panel detect/disconnect */
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@
 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	/* FIXME remove this once we support unmappable VRAM */
-	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-		rdev->mc.real_vram_size = rdev->mc.aper_size;
-	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
 
 	if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@
 			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
 			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
 			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
-	u32 srbm_reset = 0;
 	u32 tmp;
 
 	dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@
 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
 	}
 	/* Disable CP parsing/prefetching */
-	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 	/* Check if any of the rendering block is busy and reset it */
 	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
 	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@
 			S_008020_SOFT_RESET_VGT(1);
 		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
 		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
-		(void)RREG32(R_008020_GRBM_SOFT_RESET);
-		udelay(50);
+		RREG32(R_008020_GRBM_SOFT_RESET);
+		mdelay(15);
 		WREG32(R_008020_GRBM_SOFT_RESET, 0);
-		(void)RREG32(R_008020_GRBM_SOFT_RESET);
 	}
 	/* Reset CP (we always reset CP) */
 	tmp = S_008020_SOFT_RESET_CP(1);
 	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
 	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
-	(void)RREG32(R_008020_GRBM_SOFT_RESET);
-	udelay(50);
+	RREG32(R_008020_GRBM_SOFT_RESET);
+	mdelay(15);
 	WREG32(R_008020_GRBM_SOFT_RESET, 0);
-	(void)RREG32(R_008020_GRBM_SOFT_RESET);
-	/* Reset others GPU block if necessary */
-	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
-	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
-	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
-	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
-	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
-	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
-	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-	udelay(50);
-	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-	udelay(50);
-	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
 	/* Wait a little for things to settle down */
-	udelay(50);
+	mdelay(1);
 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
 		RREG32(R_008010_GRBM_STATUS));
 	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
 		RREG32(R_008014_GRBM_STATUS2));
 	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
 		RREG32(R_000E50_SRBM_STATUS));
-	/* After reset we need to reinit the asic as GPU often endup in an
-	 * incoherent state.
-	 */
-	atom_asic_init(rdev->mode_info.atom_context);
 	rv515_mc_resume(rdev, &save);
 	return 0;
 }
 
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+	u32 srbm_status;
+	u32 grbm_status;
+	u32 grbm_status2;
+	int r;
+
+	srbm_status = RREG32(R_000E50_SRBM_STATUS);
+	grbm_status = RREG32(R_008010_GRBM_STATUS);
+	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+	if (!G_008010_GUI_ACTIVE(grbm_status)) {
+		r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+		return false;
+	}
+	/* force CP activities */
+	r = radeon_ring_lock(rdev, 2);
+	if (!r) {
+		/* PACKET2 NOP */
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_unlock_commit(rdev);
+	}
+	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
 {
 	return r600_gpu_soft_reset(rdev);
 }
@@ -1467,10 +1855,31 @@
 		chip_name = "RV710";
 		rlc_chip_name = "R700";
 		break;
+	case CHIP_CEDAR:
+		chip_name = "CEDAR";
+		rlc_chip_name = "CEDAR";
+		break;
+	case CHIP_REDWOOD:
+		chip_name = "REDWOOD";
+		rlc_chip_name = "REDWOOD";
+		break;
+	case CHIP_JUNIPER:
+		chip_name = "JUNIPER";
+		rlc_chip_name = "JUNIPER";
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_name = "CYPRESS";
+		rlc_chip_name = "CYPRESS";
+		break;
 	default: BUG();
 	}
 
-	if (rdev->family >= CHIP_RV770) {
+	if (rdev->family >= CHIP_CEDAR) {
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	} else if (rdev->family >= CHIP_RV770) {
 		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
 		me_req_size = R700_PM4_UCODE_SIZE * 4;
 		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@
 	}
 	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
 	radeon_ring_write(rdev, 0x1);
-	if (rdev->family < CHIP_RV770) {
-		radeon_ring_write(rdev, 0x3);
-		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
-	} else {
+	if (rdev->family >= CHIP_CEDAR) {
+		radeon_ring_write(rdev, 0x0);
+		radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+	} else if (rdev->family >= CHIP_RV770) {
 		radeon_ring_write(rdev, 0x0);
 		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+	} else {
+		radeon_ring_write(rdev, 0x3);
+		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
 	}
 	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
 	radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@
 	r = radeon_clocks_init(rdev);
 	if (r)
 		return r;
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* Fence driver */
 	r = radeon_fence_driver_init(rdev);
 	if (r)
@@ -2117,7 +2527,6 @@
 
 void r600_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r600_blit_fini(rdev);
 	r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@
 	}
 }
 
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
 {
 
-	if (rdev->family >= CHIP_RV770) {
+	if ((rdev->family >= CHIP_RV770) &&
+	    (rdev->family <= CHIP_RV740)) {
 		/* r7xx asics need to soft reset RLC before halting */
 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
 		RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@
 	WREG32(RLC_UCODE_CNTL, 0);
 
 	fw_data = (const __be32 *)rdev->rlc_fw->data;
-	if (rdev->family >= CHIP_RV770) {
+	if (rdev->family >= CHIP_CEDAR) {
+		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_RV770) {
 		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
 			WREG32(RLC_UCODE_ADDR, i);
 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@
 	rdev->ih.enabled = true;
 }
 
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
 {
 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 	u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@
 	WREG32(IH_CNTL, ih_cntl);
 
 	/* force the active interrupt state to all disabled */
-	r600_disable_interrupt_state(rdev);
+	if (rdev->family >= CHIP_CEDAR)
+		evergreen_disable_interrupt_state(rdev);
+	else
+		r600_disable_interrupt_state(rdev);
 
 	/* enable irqs */
 	r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@
 
 void r600_irq_suspend(struct radeon_device *rdev)
 {
-	r600_disable_interrupts(rdev);
+	r600_irq_disable(rdev);
 	r600_rlc_stop(rdev);
 }
 
@@ -2500,6 +2918,8 @@
 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
 	u32 mode_int = 0;
 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 hdmi1, hdmi2;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@
 		return 0;
 	}
 
+	hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
 	if (ASIC_IS_DCE3(rdev)) {
+		hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@
 			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		}
 	} else {
+		hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
 		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@
 		DRM_DEBUG("r600_irq_set: hpd 6\n");
 		hpd6 |= DC_HPDx_INT_EN;
 	}
+	if (rdev->irq.hdmi[0]) {
+		DRM_DEBUG("r600_irq_set: hdmi 1\n");
+		hdmi1 |= R600_HDMI_INT_EN;
+	}
+	if (rdev->irq.hdmi[1]) {
+		DRM_DEBUG("r600_irq_set: hdmi 2\n");
+		hdmi2 |= R600_HDMI_INT_EN;
+	}
+	if (rdev->irq.gui_idle) {
+		DRM_DEBUG("gui idle\n");
+		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+	}
 
 	WREG32(CP_INT_CNTL, cp_int_cntl);
 	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
 	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
 		WREG32(DC_HPD1_INT_CONTROL, hpd1);
 		WREG32(DC_HPD2_INT_CONTROL, hpd2);
 		WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@
 			WREG32(DC_HPD6_INT_CONTROL, hpd6);
 		}
 	} else {
+		WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
 		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
 		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
 		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@
 			WREG32(DC_HPD6_INT_CONTROL, tmp);
 		}
 	}
+	if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+		WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+	}
+	if (ASIC_IS_DCE3(rdev)) {
+		if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+			WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+		}
+	} else {
+		if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+			WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+		}
+	}
 }
 
 void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@
  *     19         1  FP Hot plug detection B
  *     19         2  DAC A auto-detection
  *     19         3  DAC B auto-detection
+ *     21         4  HDMI block A
+ *     21         5  HDMI block B
  *    176         -  CP_INT RB
  *    177         -  CP_INT IB1
  *    178         -  CP_INT IB2
@@ -2852,6 +3305,10 @@
 				break;
 			}
 			break;
+		case 21: /* HDMI */
+			DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+			r600_audio_schedule_polling(rdev);
+			break;
 		case 176: /* CP_INT in ring buffer */
 		case 177: /* CP_INT in IB1 */
 		case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@
 		case 181: /* CP EOP event */
 			DRM_DEBUG("IH: CP EOP\n");
 			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: CP EOP\n");
+			rdev->pm.gui_idle = true;
+			wake_up(&rdev->irq.idle_queue);
+			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 			break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d89805..2b26553 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@
 /*
  * current number of channels
  */
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
 {
 	return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
 }
@@ -52,7 +52,7 @@
 /*
  * current bits per sample
  */
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
 {
 	uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
 	switch (value) {
@@ -71,7 +71,7 @@
 /*
  * current sampling rate in HZ
  */
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
 {
 	uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
 	uint32_t result;
@@ -90,7 +90,7 @@
 /*
  * iec 60958 status bits
  */
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
 {
 	return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
 }
@@ -98,12 +98,21 @@
 /*
  * iec 60958 category code
  */
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
 {
 	return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
 }
 
 /*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+	mod_timer(&rdev->audio_timer,
+		jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
  * update all hdmi interfaces with current audio parameters
  */
 static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@
 	uint8_t category_code = r600_audio_category_code(rdev);
 
 	struct drm_encoder *encoder;
-	int changes = 0;
+	int changes = 0, still_going = 0;
 
 	changes |= channels != rdev->audio_channels;
 	changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		still_going |= radeon_encoder->audio_polling_active;
 		if (changes || r600_hdmi_buffer_status_changed(encoder))
-			r600_hdmi_update_audio_settings(
-				encoder, channels,
-				rate, bps, status_bits,
-				category_code);
+			r600_hdmi_update_audio_settings(encoder);
 	}
 
-	mod_timer(&rdev->audio_timer,
-		jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+	if(still_going) r600_audio_schedule_polling(rdev);
 }
 
 /*
@@ -176,12 +183,37 @@
 		r600_audio_update_hdmi,
 		(unsigned long)rdev);
 
-	mod_timer(&rdev->audio_timer, jiffies + 1);
-
 	return 0;
 }
 
 /*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+	if (radeon_encoder->audio_polling_active)
+		return;
+
+	radeon_encoder->audio_polling_active = 1;
+	mod_timer(&rdev->audio_timer, jiffies + 1);
+}
+
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+	radeon_encoder->audio_polling_active = 0;
+}
+
+/*
  * atach the audio codec to the clock source of the encoder
  */
 void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77..d13622a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@
 	u32 packet2s[16];
 	int num_packet2s = 0;
 
+	/* don't reinitialize blit */
+	if (rdev->r600_blit.shader_obj)
+		return 0;
 	mutex_init(&rdev->r600_blit.mutex);
 	rdev->r600_blit.state_offset = 0;
 
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b82..26b4bc9 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@
 	if (!offset)
 		return;
 
-	if (r600_hdmi_is_audio_buffer_filled(encoder)) {
-		/* disable audio workaround and start delivering of audio frames */
+	if (!radeon_encoder->hdmi_audio_workaround ||
+		r600_hdmi_is_audio_buffer_filled(encoder)) {
+
+		/* disable audio workaround */
 		WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
 
-	} else if (radeon_encoder->hdmi_audio_workaround) {
-		/* enable audio workaround and start delivering of audio frames */
-		WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
-
 	} else {
-		/* disable audio workaround and stop delivering of audio frames */
-		WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+		/* enable audio workaround */
+		WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
 	}
 }
 
@@ -345,25 +343,23 @@
 
 	/* audio packets per line, does anyone know how to calc this ? */
 	WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
-	/* update? reset? don't realy know */
-	WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
 }
 
 /*
  * update settings with current parameters from audio engine
  */
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
-				     int channels,
-				     int rate,
-				     int bps,
-				     uint8_t status_bits,
-				     uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
+	int channels = r600_audio_channels(rdev);
+	int rate = r600_audio_rate(rdev);
+	int bps = r600_audio_bits_per_sample(rdev);
+	uint8_t status_bits = r600_audio_status_bits(rdev);
+	uint8_t category_code = r600_audio_category_code(rdev);
+
 	uint32_t iec;
 
 	if (!offset)
@@ -415,9 +411,6 @@
 	r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
 
 	r600_hdmi_audio_workaround(encoder);
-
-	/* update? reset? don't realy know */
-	WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
 }
 
 static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t offset;
 
 	if (ASIC_IS_DCE4(rdev))
 		return;
@@ -499,10 +493,10 @@
 		}
 	}
 
+	offset = radeon_encoder->hdmi_offset;
 	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
 		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
 	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-		int offset = radeon_encoder->hdmi_offset;
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
 			WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@
 		}
 	}
 
+	if (rdev->irq.installed
+	    && rdev->family != CHIP_RS600
+	    && rdev->family != CHIP_RS690
+	    && rdev->family != CHIP_RS740) {
+
+		/* if irq is available use it */
+		rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+		radeon_irq_set(rdev);
+
+		r600_audio_disable_polling(encoder);
+	} else {
+		/* if not fallback to polling */
+		r600_audio_enable_polling(encoder);
+	}
+
 	DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
 		radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
 }
@@ -530,22 +539,30 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t offset;
 
 	if (ASIC_IS_DCE4(rdev))
 		return;
 
-	if (!radeon_encoder->hdmi_offset) {
+	offset = radeon_encoder->hdmi_offset;
+	if (!offset) {
 		dev_err(rdev->dev, "Disabling not enabled HDMI\n");
 		return;
 	}
 
 	DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
-		radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+		offset, radeon_encoder->encoder_id);
+
+	/* disable irq */
+	rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+	radeon_irq_set(rdev);
+
+	/* disable polling */
+	r600_audio_disable_polling(encoder);
 
 	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
 		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
 	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-		int offset = radeon_encoder->hdmi_offset;
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
 			WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d223..d84612a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
 #define R600_HDMI_BLOCK3                  0x7800
 
 /* HDMI registers */
-#define R600_HDMI_ENABLE           0x00
-#define R600_HDMI_STATUS           0x04
-#define R600_HDMI_CNTL             0x08
-#define R600_HDMI_UNKNOWN_0        0x0C
-#define R600_HDMI_AUDIOCNTL        0x10
-#define R600_HDMI_VIDEOCNTL        0x14
-#define R600_HDMI_VERSION          0x18
-#define R600_HDMI_UNKNOWN_1        0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS        0xac
-#define R600_HDMI_32kHz_N          0xb0
-#define R600_HDMI_44_1kHz_CTS      0xb4
-#define R600_HDMI_44_1kHz_N        0xb8
-#define R600_HDMI_48kHz_CTS        0xbc
-#define R600_HDMI_48kHz_N          0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1       0xd4
-#define R600_HDMI_IEC60958_2       0xd8
-#define R600_HDMI_UNKNOWN_2        0xdc
-#define R600_HDMI_AUDIO_DEBUG_0    0xe0
-#define R600_HDMI_AUDIO_DEBUG_1    0xe4
-#define R600_HDMI_AUDIO_DEBUG_2    0xe8
-#define R600_HDMI_AUDIO_DEBUG_3    0xec
+#define R600_HDMI_ENABLE                0x00
+#define R600_HDMI_STATUS                0x04
+#       define R600_HDMI_INT_PENDING    (1 << 29)
+#define R600_HDMI_CNTL                  0x08
+#       define R600_HDMI_INT_EN         (1 << 28)
+#       define R600_HDMI_INT_ACK        (1 << 29)
+#define R600_HDMI_UNKNOWN_0             0x0C
+#define R600_HDMI_AUDIOCNTL             0x10
+#define R600_HDMI_VIDEOCNTL             0x14
+#define R600_HDMI_VERSION               0x18
+#define R600_HDMI_UNKNOWN_1             0x28
+#define R600_HDMI_VIDEOINFOFRAME_0      0x54
+#define R600_HDMI_VIDEOINFOFRAME_1      0x58
+#define R600_HDMI_VIDEOINFOFRAME_2      0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3      0x60
+#define R600_HDMI_32kHz_CTS             0xac
+#define R600_HDMI_32kHz_N               0xb0
+#define R600_HDMI_44_1kHz_CTS           0xb4
+#define R600_HDMI_44_1kHz_N             0xb8
+#define R600_HDMI_48kHz_CTS             0xbc
+#define R600_HDMI_48kHz_N               0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0      0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1      0xd0
+#define R600_HDMI_IEC60958_1            0xd4
+#define R600_HDMI_IEC60958_2            0xd8
+#define R600_HDMI_UNKNOWN_2             0xdc
+#define R600_HDMI_AUDIO_DEBUG_0         0xe0
+#define R600_HDMI_AUDIO_DEBUG_1         0xe4
+#define R600_HDMI_AUDIO_DEBUG_2         0xe8
+#define R600_HDMI_AUDIO_DEBUG_3         0xec
 
 /* HDMI additional config base register addresses */
 #define R600_HDMI_CONFIG1                 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c..66a37fb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@
 extern int radeon_connector_table;
 extern int radeon_tv;
 extern int radeon_new_pll;
-extern int radeon_dynpm;
 extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT	(HZ / 2)
 /* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE		16
 #define RADEON_DEBUGFS_MAX_NUM_FILES	32
@@ -172,6 +172,8 @@
 int radeon_pm_init(struct radeon_device *rdev);
 void radeon_pm_fini(struct radeon_device *rdev);
 void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 
@@ -182,7 +184,8 @@
 	uint32_t			scratch_reg;
 	atomic_t			seq;
 	uint32_t			last_seq;
-	unsigned long			count_timeout;
+	unsigned long			last_jiffies;
+	unsigned long			last_timeout;
 	wait_queue_head_t		queue;
 	rwlock_t			lock;
 	struct list_head		created;
@@ -197,7 +200,6 @@
 	struct list_head		list;
 	/* protected by radeon_fence.lock */
 	uint32_t			seq;
-	unsigned long			timeout;
 	bool				emited;
 	bool				signaled;
 };
@@ -259,6 +261,7 @@
 	unsigned		rdomain;
 	unsigned		wdomain;
 	u32			tiling_flags;
+	bool			reserved;
 };
 
 /*
@@ -371,10 +374,15 @@
 	bool		installed;
 	bool		sw_int;
 	/* FIXME: use a define max crtc rather than hardcode it */
-	bool		crtc_vblank_int[2];
+	bool		crtc_vblank_int[6];
 	wait_queue_head_t	vblank_queue;
 	/* FIXME: use defines for max hpd/dacs */
 	bool            hpd[6];
+	bool            gui_idle;
+	bool            gui_idle_acked;
+	wait_queue_head_t	idle_queue;
+	/* FIXME: use defines for max HDMI blocks */
+	bool		hdmi[2];
 	spinlock_t sw_lock;
 	int sw_refcount;
 };
@@ -462,7 +470,9 @@
 extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
 /* Ring access between begin & end cannot sleep */
 void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
 void radeon_ring_unlock_commit(struct radeon_device *rdev);
 void radeon_ring_unlock_undo(struct radeon_device *rdev);
 int radeon_ring_test(struct radeon_device *rdev);
@@ -597,17 +607,24 @@
  * Equation between gpu/memory clock and available bandwidth is hw dependent
  * (type of memory, bus size, efficiency, ...)
  */
-enum radeon_pm_state {
-	PM_STATE_DISABLED,
-	PM_STATE_MINIMUM,
-	PM_STATE_PAUSED,
-	PM_STATE_ACTIVE
+
+enum radeon_pm_method {
+	PM_METHOD_PROFILE,
+	PM_METHOD_DYNPM,
 };
-enum radeon_pm_action {
-	PM_ACTION_NONE,
-	PM_ACTION_MINIMUM,
-	PM_ACTION_DOWNCLOCK,
-	PM_ACTION_UPCLOCK
+
+enum radeon_dynpm_state {
+	DYNPM_STATE_DISABLED,
+	DYNPM_STATE_MINIMUM,
+	DYNPM_STATE_PAUSED,
+	DYNPM_STATE_ACTIVE
+};
+enum radeon_dynpm_action {
+	DYNPM_ACTION_NONE,
+	DYNPM_ACTION_MINIMUM,
+	DYNPM_ACTION_DOWNCLOCK,
+	DYNPM_ACTION_UPCLOCK,
+	DYNPM_ACTION_DEFAULT
 };
 
 enum radeon_voltage_type {
@@ -625,11 +642,25 @@
 	POWER_STATE_TYPE_PERFORMANCE,
 };
 
-enum radeon_pm_clock_mode_type {
-	POWER_MODE_TYPE_DEFAULT,
-	POWER_MODE_TYPE_LOW,
-	POWER_MODE_TYPE_MID,
-	POWER_MODE_TYPE_HIGH,
+enum radeon_pm_profile_type {
+	PM_PROFILE_DEFAULT,
+	PM_PROFILE_AUTO,
+	PM_PROFILE_LOW,
+	PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX  1
+#define PM_PROFILE_HIGH_SH_IDX 2
+#define PM_PROFILE_LOW_MH_IDX  3
+#define PM_PROFILE_HIGH_MH_IDX 4
+#define PM_PROFILE_MAX         5
+
+struct radeon_pm_profile {
+	int dpms_off_ps_idx;
+	int dpms_on_ps_idx;
+	int dpms_off_cm_idx;
+	int dpms_on_cm_idx;
 };
 
 struct radeon_voltage {
@@ -646,12 +677,8 @@
 	u32 voltage;
 };
 
-struct radeon_pm_non_clock_info {
-	/* pcie lanes */
-	int pcie_lanes;
-	/* standardized non-clock flags */
-	u32 flags;
-};
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
 
 struct radeon_pm_clock_info {
 	/* memory clock */
@@ -660,10 +687,13 @@
 	u32 sclk;
 	/* voltage info */
 	struct radeon_voltage voltage;
-	/* standardized clock flags - not sure we'll need these */
+	/* standardized clock flags */
 	u32 flags;
 };
 
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
 struct radeon_power_state {
 	enum radeon_pm_state_type type;
 	/* XXX: use a define for num clock modes */
@@ -671,9 +701,11 @@
 	/* number of valid clock modes in this power state */
 	int num_clock_modes;
 	struct radeon_pm_clock_info *default_clock_mode;
-	/* non clock info about this state */
-	struct radeon_pm_non_clock_info non_clock_info;
-	bool voltage_drop_active;
+	/* standardized state flags */
+	u32 flags;
+	u32 misc; /* vbios specific flags */
+	u32 misc2; /* vbios specific flags */
+	int pcie_lanes; /* pcie lanes */
 };
 
 /*
@@ -683,14 +715,11 @@
 
 struct radeon_pm {
 	struct mutex		mutex;
-	struct delayed_work	idle_work;
-	enum radeon_pm_state	state;
-	enum radeon_pm_action	planned_action;
-	unsigned long		action_timeout;
-	bool 			downclocked;
-	int			active_crtcs;
+	u32			active_crtcs;
+	int			active_crtc_count;
 	int			req_vblank;
 	bool			vblank_sync;
+	bool			gui_idle;
 	fixed20_12		max_bandwidth;
 	fixed20_12		igp_sideport_mclk;
 	fixed20_12		igp_system_mclk;
@@ -707,12 +736,27 @@
 	struct radeon_power_state power_state[8];
 	/* number of valid power states */
 	int                     num_power_states;
-	struct radeon_power_state *current_power_state;
-	struct radeon_pm_clock_info *current_clock_mode;
-	struct radeon_power_state *requested_power_state;
-	struct radeon_pm_clock_info *requested_clock_mode;
-	struct radeon_power_state *default_power_state;
+	int                     current_power_state_index;
+	int                     current_clock_mode_index;
+	int                     requested_power_state_index;
+	int                     requested_clock_mode_index;
+	int                     default_power_state_index;
+	u32                     current_sclk;
+	u32                     current_mclk;
 	struct radeon_i2c_chan *i2c_bus;
+	/* selected pm method */
+	enum radeon_pm_method     pm_method;
+	/* dynpm power management */
+	struct delayed_work	dynpm_idle_work;
+	enum radeon_dynpm_state	dynpm_state;
+	enum radeon_dynpm_action	dynpm_planned_action;
+	unsigned long		dynpm_action_timeout;
+	bool                    dynpm_can_upclock;
+	bool                    dynpm_can_downclock;
+	/* profile-based power management */
+	enum radeon_pm_profile_type profile;
+	int                     profile_index;
+	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
 };
 
 
@@ -746,7 +790,8 @@
 	int (*resume)(struct radeon_device *rdev);
 	int (*suspend)(struct radeon_device *rdev);
 	void (*vga_set_state)(struct radeon_device *rdev, bool state);
-	int (*gpu_reset)(struct radeon_device *rdev);
+	bool (*gpu_is_lockup)(struct radeon_device *rdev);
+	int (*asic_reset)(struct radeon_device *rdev);
 	void (*gart_tlb_flush)(struct radeon_device *rdev);
 	int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
 	int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +844,84 @@
 	 * through ring.
 	 */
 	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+	bool (*gui_idle)(struct radeon_device *rdev);
+	/* power management */
+	void (*pm_misc)(struct radeon_device *rdev);
+	void (*pm_prepare)(struct radeon_device *rdev);
+	void (*pm_finish)(struct radeon_device *rdev);
+	void (*pm_init_profile)(struct radeon_device *rdev);
+	void (*pm_get_dynpm_state)(struct radeon_device *rdev);
 };
 
 /*
  * Asic structures
  */
+struct r100_gpu_lockup {
+	unsigned long	last_jiffies;
+	u32		last_cp_rptr;
+};
+
 struct r100_asic {
-	const unsigned	*reg_safe_bm;
-	unsigned	reg_safe_bm_size;
-	u32		hdp_cntl;
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			hdp_cntl;
+	struct r100_gpu_lockup	lockup;
 };
 
 struct r300_asic {
-	const unsigned	*reg_safe_bm;
-	unsigned	reg_safe_bm_size;
-	u32		resync_scratch;
-	u32		hdp_cntl;
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			resync_scratch;
+	u32			hdp_cntl;
+	struct r100_gpu_lockup	lockup;
 };
 
 struct r600_asic {
-	unsigned max_pipes;
-	unsigned max_tile_pipes;
-	unsigned max_simds;
-	unsigned max_backends;
-	unsigned max_gprs;
-	unsigned max_threads;
-	unsigned max_stack_entries;
-	unsigned max_hw_contexts;
-	unsigned max_gs_threads;
-	unsigned sx_max_export_size;
-	unsigned sx_max_export_pos_size;
-	unsigned sx_max_export_smx_size;
-	unsigned sq_num_cf_insts;
-	unsigned tiling_nbanks;
-	unsigned tiling_npipes;
-	unsigned tiling_group_size;
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	struct r100_gpu_lockup	lockup;
 };
 
 struct rv770_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		sx_num_of_sets;
+	unsigned		sc_prim_fifo_size;
+	unsigned		sc_hiz_tile_fifo_size;
+	unsigned		sc_earlyz_tile_fifo_fize;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	struct r100_gpu_lockup	lockup;
+};
+
+struct evergreen_asic {
+	unsigned num_ses;
 	unsigned max_pipes;
 	unsigned max_tile_pipes;
 	unsigned max_simds;
@@ -853,7 +938,7 @@
 	unsigned sx_num_of_sets;
 	unsigned sc_prim_fifo_size;
 	unsigned sc_hiz_tile_fifo_size;
-	unsigned sc_earlyz_tile_fifo_fize;
+	unsigned sc_earlyz_tile_fifo_size;
 	unsigned tiling_nbanks;
 	unsigned tiling_npipes;
 	unsigned tiling_group_size;
@@ -864,6 +949,7 @@
 	struct r100_asic	r100;
 	struct r600_asic	r600;
 	struct rv770_asic	rv770;
+	struct evergreen_asic	evergreen;
 };
 
 /*
@@ -927,9 +1013,6 @@
 	bool				is_atom_bios;
 	uint16_t			bios_header_start;
 	struct radeon_bo		*stollen_vga_memory;
-	struct fb_info			*fbdev_info;
-	struct radeon_bo		*fbdev_rbo;
-	struct radeon_framebuffer	*fbdev_rfb;
 	/* Register mmio */
 	resource_size_t			rmmio_base;
 	resource_size_t			rmmio_size;
@@ -974,6 +1057,7 @@
 	struct work_struct hotplug_work;
 	int num_crtc; /* number of crtcs */
 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+	struct mutex vram_mutex;
 
 	/* audio stuff */
 	struct timer_list	audio_timer;
@@ -984,6 +1068,7 @@
 	uint8_t			audio_category_code;
 
 	bool powered_down;
+	struct notifier_block acpi_nb;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1230,8 @@
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1259,16 @@
 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
 
 /* Common functions */
 /* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
 extern void radeon_agp_disable(struct radeon_device *rdev);
 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
 extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1293,8 @@
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
 
 /* rv200,rv250,rv280 */
 extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1355,7 @@
 extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern bool r600_card_posted(struct radeon_device *rdev);
 extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
 extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_cp_resume(struct radeon_device *rdev);
 extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1372,39 @@
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
 extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
 /* r600 irq */
 extern int r600_irq_init(struct radeon_device *rdev);
 extern void r600_irq_fini(struct radeon_device *rdev);
 extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_irq_set(struct radeon_device *rdev);
 extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
 /* r600 audio */
 extern int r600_audio_init(struct radeon_device *rdev);
 extern int r600_audio_tmds_index(struct drm_encoder *encoder);
 extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
 extern void r600_audio_fini(struct radeon_device *rdev);
 extern void r600_hdmi_init(struct drm_encoder *encoder);
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
 extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
-					    int channels,
-					    int rate,
-					    int bps,
-					    uint8_t status_bits,
-					    uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
 
 /* evergreen */
 struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9..e57df08 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@
 	.suspend = &r100_suspend,
 	.resume = &r100_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r100_gpu_reset,
+	.gpu_is_lockup = &r100_gpu_is_lockup,
+	.asic_reset = &r100_asic_reset,
 	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
 	.gart_set_page = &r100_pci_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r100_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@
 	.suspend = &r100_suspend,
 	.resume = &r100_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r100_gpu_reset,
+	.gpu_is_lockup = &r100_gpu_is_lockup,
+	.asic_reset = &r100_asic_reset,
 	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
 	.gart_set_page = &r100_pci_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r100_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@
 	.suspend = &r300_suspend,
 	.resume = &r300_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &r300_asic_reset,
 	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
 	.gart_set_page = &r100_pci_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r100_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@
 	.suspend = &r300_suspend,
 	.resume = &r300_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &r300_asic_reset,
 	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
 	.gart_set_page = &rv370_pcie_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r100_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@
 	.suspend = &r420_suspend,
 	.resume = &r420_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &r300_asic_reset,
 	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
 	.gart_set_page = &rv370_pcie_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r420_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@
 	.suspend = &rs400_suspend,
 	.resume = &rs400_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &r300_asic_reset,
 	.gart_tlb_flush = &rs400_gart_tlb_flush,
 	.gart_set_page = &rs400_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_set_polarity = &r100_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &r100_pm_misc,
+	.pm_prepare = &r100_pm_prepare,
+	.pm_finish = &r100_pm_finish,
+	.pm_init_profile = &r100_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@
 	.suspend = &rs600_suspend,
 	.resume = &rs600_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &rs600_asic_reset,
 	.gart_tlb_flush = &rs600_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_set_polarity = &rs600_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &rs600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r420_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@
 	.suspend = &rs690_suspend,
 	.resume = &rs690_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &rs600_asic_reset,
 	.gart_tlb_flush = &rs400_gart_tlb_flush,
 	.gart_set_page = &rs400_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_set_polarity = &rs600_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &rs600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r420_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@
 	.suspend = &rv515_suspend,
 	.resume = &rv515_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &rv515_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &rs600_asic_reset,
 	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
 	.gart_set_page = &rv370_pcie_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_set_polarity = &rs600_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &rs600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r420_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@
 	.suspend = &rv515_suspend,
 	.resume = &r520_resume,
 	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &rv515_gpu_reset,
+	.gpu_is_lockup = &r300_gpu_is_lockup,
+	.asic_reset = &rs600_asic_reset,
 	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
 	.gart_set_page = &rv370_pcie_gart_set_page,
 	.cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_set_polarity = &rs600_hpd_set_polarity,
 	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.pm_misc = &rs600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r420_pm_init_profile,
+	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@
 	.resume = &r600_resume,
 	.cp_commit = &r600_cp_commit,
 	.vga_set_state = &r600_vga_set_state,
-	.gpu_reset = &r600_gpu_reset,
+	.gpu_is_lockup = &r600_gpu_is_lockup,
+	.asic_reset = &r600_asic_reset,
 	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@
 	.hpd_sense = &r600_hpd_sense,
 	.hpd_set_polarity = &r600_hpd_set_polarity,
 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &r600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r600_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@
 	.suspend = &r600_suspend,
 	.resume = &r600_resume,
 	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &r600_gpu_is_lockup,
 	.vga_set_state = &r600_vga_set_state,
-	.gpu_reset = &r600_gpu_reset,
+	.asic_reset = &r600_asic_reset,
 	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@
 	.hpd_sense = &r600_hpd_sense,
 	.hpd_set_polarity = &r600_hpd_set_polarity,
 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &r600_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &rs780_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@
 	.suspend = &rv770_suspend,
 	.resume = &rv770_resume,
 	.cp_commit = &r600_cp_commit,
-	.gpu_reset = &rv770_gpu_reset,
+	.asic_reset = &r600_asic_reset,
+	.gpu_is_lockup = &r600_gpu_is_lockup,
 	.vga_set_state = &r600_vga_set_state,
 	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@
 	.hpd_sense = &r600_hpd_sense,
 	.hpd_set_polarity = &r600_hpd_set_polarity,
 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &rv770_pm_misc,
+	.pm_prepare = &rs600_pm_prepare,
+	.pm_finish = &rs600_pm_finish,
+	.pm_init_profile = &r600_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@
 	.fini = &evergreen_fini,
 	.suspend = &evergreen_suspend,
 	.resume = &evergreen_resume,
-	.cp_commit = NULL,
-	.gpu_reset = &evergreen_gpu_reset,
+	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+	.asic_reset = &evergreen_asic_reset,
 	.vga_set_state = &r600_vga_set_state,
-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
-	.ring_test = NULL,
-	.ring_ib_execute = NULL,
-	.irq_set = NULL,
-	.irq_process = NULL,
-	.get_vblank_counter = NULL,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &evergreen_irq_set,
+	.irq_process = &evergreen_irq_process,
+	.get_vblank_counter = &evergreen_get_vblank_counter,
 	.fence_ring_emit = NULL,
 	.cs_parse = NULL,
 	.copy_blit = NULL,
@@ -650,6 +742,12 @@
 	.hpd_fini = &evergreen_hpd_fini,
 	.hpd_sense = &evergreen_hpd_sense,
 	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &evergreen_pm_misc,
+	.pm_prepare = &evergreen_pm_prepare,
+	.pm_finish = &evergreen_pm_finish,
+	.pm_init_profile = &r600_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280..5c40a3d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@
 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@
 void r100_wb_disable(struct radeon_device *rdev);
 void r100_wb_fini(struct radeon_device *rdev);
 int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
 int r100_cp_reset(struct radeon_device *rdev);
 void r100_vga_render_disable(struct radeon_device *rdev);
 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@
 			 unsigned idx);
 void r100_enable_bm(struct radeon_device *rdev);
 void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
 
 /*
  * r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@
 			uint64_t src_offset,
 			uint64_t dst_offset,
 			unsigned num_pages,
-			struct radeon_fence *fence);
+			 struct radeon_fence *fence);
 
 /*
  * r300,r350,rv350,rv380
@@ -143,7 +149,8 @@
 extern void r300_fini(struct radeon_device *rdev);
 extern int r300_suspend(struct radeon_device *rdev);
 extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
 extern void r300_ring_start(struct radeon_device *rdev);
 extern void r300_fence_ring_emit(struct radeon_device *rdev,
 				struct radeon_fence *fence);
@@ -162,6 +169,7 @@
 extern void r420_fini(struct radeon_device *rdev);
 extern int r420_suspend(struct radeon_device *rdev);
 extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
 
 /*
  * rs400,rs480
@@ -178,6 +186,7 @@
 /*
  * rs600.
  */
+extern int rs600_asic_reset(struct radeon_device *rdev);
 extern int rs600_init(struct radeon_device *rdev);
 extern void rs600_fini(struct radeon_device *rdev);
 extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@
 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void rs600_hpd_set_polarity(struct radeon_device *rdev,
 			    enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
 
 /*
  * rs690,rs740
@@ -212,7 +224,6 @@
  */
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@
 		  struct radeon_fence *fence);
 int r600_irq_process(struct radeon_device *rdev);
 int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@
 void r600_hpd_set_polarity(struct radeon_device *rdev,
 			   enum radeon_hpd_id hpd);
 extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@
 void rv770_fini(struct radeon_device *rdev);
 int rv770_suspend(struct radeon_device *rdev);
 int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
 
 /*
  * evergreen
  */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 int evergreen_suspend(struct radeon_device *rdev);
 int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
 				enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825..6e733fd 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@
 			}
 
 			/* look up gpio for ddc, hpd */
+			ddc_bus.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
 			if ((le16_to_cpu(path->usDeviceTag) &
 			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
 				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@
 						ATOM_I2C_RECORD *i2c_record;
 						ATOM_HPD_INT_RECORD *hpd_record;
 						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
-						hpd.hpd = RADEON_HPD_NONE;
 
 						while (record->ucRecordType > 0
 						       && record->
@@ -585,13 +586,10 @@
 						break;
 					}
 				}
-			} else {
-				hpd.hpd = RADEON_HPD_NONE;
-				ddc_bus.valid = false;
 			}
 
 			/* needed for aux chan transactions */
-			ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+			ddc_bus.hpd = hpd.hpd;
 
 			conn_id = le16_to_cpu(path->usConnObjectId);
 
@@ -1174,7 +1172,7 @@
 		lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
 			le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
 		lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
-			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
 		lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
 			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
 		lvds->panel_pwr_delay =
@@ -1442,26 +1440,30 @@
 
 static const char *thermal_controller_names[] = {
 	"NONE",
-	"LM63",
-	"ADM1032",
-	"ADM1030",
-	"MUA6649",
-	"LM64",
-	"F75375",
-	"ASC7512",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"asc7xxx",
 };
 
 static const char *pp_lib_thermal_controller_names[] = {
 	"NONE",
-	"LM63",
-	"ADM1032",
-	"ADM1030",
-	"MUA6649",
-	"LM64",
-	"F75375",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
 	"RV6xx",
 	"RV770",
-	"ADT7473",
+	"adt7473",
+	"External GPIO",
+	"Evergreen",
+	"adt7473 with internal",
+
 };
 
 union power_info {
@@ -1485,7 +1487,7 @@
 	int state_index = 0, mode_index = 0;
 	struct radeon_i2c_bus_rec i2c_bus;
 
-	rdev->pm.default_power_state = NULL;
+	rdev->pm.default_power_state_index = -1;
 
 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
 				   &frev, &crev, &data_offset)) {
@@ -1498,10 +1500,19 @@
 					 power_info->info.ucOverdriveControllerAddress >> 1);
 				i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
 				rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+				if (rdev->pm.i2c_bus) {
+					struct i2c_board_info info = { };
+					const char *name = thermal_controller_names[power_info->info.
+										    ucOverdriveThermalController];
+					info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+					strlcpy(info.type, name, sizeof(info.type));
+					i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+				}
 			}
 			num_modes = power_info->info.ucNumOfPowerModeEntries;
 			if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
 				num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+			/* last mode is usually default, array is low to high */
 			for (i = 0; i < num_modes; i++) {
 				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
 				switch (frev) {
@@ -1515,13 +1526,7 @@
 					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
 					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
 						continue;
-					/* skip overclock modes for now */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-					     rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk >
-					     rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-						continue;
-					rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+					rdev->pm.power_state[state_index].pcie_lanes =
 						power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
 					misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
 					if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1547,8 @@
 						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
 							power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
 					}
+					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					rdev->pm.power_state[state_index].misc = misc;
 					/* order matters! */
 					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
 						rdev->pm.power_state[state_index].type =
@@ -1555,15 +1562,23 @@
 					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_PERFORMANCE;
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					}
 					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+						rdev->pm.default_power_state_index = state_index;
 						rdev->pm.power_state[state_index].default_clock_mode =
 							&rdev->pm.power_state[state_index].clock_info[0];
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					} else if (state_index == 0) {
+						rdev->pm.power_state[state_index].clock_info[0].flags |=
+							RADEON_PM_MODE_NO_DISPLAY;
 					}
 					state_index++;
 					break;
@@ -1577,13 +1592,7 @@
 					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
 					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
 						continue;
-					/* skip overclock modes for now */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-					     rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk >
-					     rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-						continue;
-					rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+					rdev->pm.power_state[state_index].pcie_lanes =
 						power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
 					misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
 					misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1614,9 @@
 						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
 							power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
 					}
+					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					rdev->pm.power_state[state_index].misc = misc;
+					rdev->pm.power_state[state_index].misc2 = misc2;
 					/* order matters! */
 					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
 						rdev->pm.power_state[state_index].type =
@@ -1618,18 +1630,29 @@
 					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_PERFORMANCE;
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					}
 					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_BALANCED;
+					if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
 					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+						rdev->pm.default_power_state_index = state_index;
 						rdev->pm.power_state[state_index].default_clock_mode =
 							&rdev->pm.power_state[state_index].clock_info[0];
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					} else if (state_index == 0) {
+						rdev->pm.power_state[state_index].clock_info[0].flags |=
+							RADEON_PM_MODE_NO_DISPLAY;
 					}
 					state_index++;
 					break;
@@ -1643,13 +1666,7 @@
 					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
 					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
 						continue;
-					/* skip overclock modes for now */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-					     rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk >
-					     rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-						continue;
-					rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+					rdev->pm.power_state[state_index].pcie_lanes =
 						power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
 					misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
 					misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1694,9 @@
 							power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
 						}
 					}
+					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					rdev->pm.power_state[state_index].misc = misc;
+					rdev->pm.power_state[state_index].misc2 = misc2;
 					/* order matters! */
 					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
 						rdev->pm.power_state[state_index].type =
@@ -1690,42 +1710,76 @@
 					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_PERFORMANCE;
+						rdev->pm.power_state[state_index].flags &=
+							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+					}
 					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_BALANCED;
 					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+						rdev->pm.default_power_state_index = state_index;
 						rdev->pm.power_state[state_index].default_clock_mode =
 							&rdev->pm.power_state[state_index].clock_info[0];
+					} else if (state_index == 0) {
+						rdev->pm.power_state[state_index].clock_info[0].flags |=
+							RADEON_PM_MODE_NO_DISPLAY;
 					}
 					state_index++;
 					break;
 				}
 			}
-		} else if (frev == 4) {
+			/* last mode is usually default */
+			if (rdev->pm.default_power_state_index == -1) {
+				rdev->pm.power_state[state_index - 1].type =
+					POWER_STATE_TYPE_DEFAULT;
+				rdev->pm.default_power_state_index = state_index - 1;
+				rdev->pm.power_state[state_index - 1].default_clock_mode =
+					&rdev->pm.power_state[state_index - 1].clock_info[0];
+				rdev->pm.power_state[state_index].flags &=
+					~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+				rdev->pm.power_state[state_index].misc = 0;
+				rdev->pm.power_state[state_index].misc2 = 0;
+			}
+		} else {
 			/* add the i2c bus for thermal/fan chip */
 			/* no support for internal controller yet */
-			if (power_info->info_4.sThermalController.ucType > 0) {
-				if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
-				    (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+			ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+			if (controller->ucType > 0) {
+				if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+				    (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+				    (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
 					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (power_info->info_4.sThermalController.ucFanParameters &
+						 (controller->ucFanParameters &
 						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+				} else if ((controller->ucType ==
+					    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+					   (controller->ucType ==
+					    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+					DRM_INFO("Special thermal controller config\n");
 				} else {
 					DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
-						 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
-						 power_info->info_4.sThermalController.ucI2cAddress >> 1,
-						 (power_info->info_4.sThermalController.ucFanParameters &
+						 pp_lib_thermal_controller_names[controller->ucType],
+						 controller->ucI2cAddress >> 1,
+						 (controller->ucFanParameters &
 						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+					i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
 					rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+					if (rdev->pm.i2c_bus) {
+						struct i2c_board_info info = { };
+						const char *name = pp_lib_thermal_controller_names[controller->ucType];
+						info.addr = controller->ucI2cAddress >> 1;
+						strlcpy(info.type, name, sizeof(info.type));
+						i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+					}
+
 				}
 			}
+			/* first mode is usually default, followed by low to high */
 			for (i = 0; i < power_info->info_4.ucNumStates; i++) {
 				mode_index = 0;
 				power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1808,34 @@
 						/* skip invalid modes */
 						if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
 							continue;
-						/* skip overclock modes for now */
-						if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
-						    rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+							VOLTAGE_SW;
+						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+							clock_info->usVDDC;
+						mode_index++;
+					} else if (ASIC_IS_DCE4(rdev)) {
+						struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+							(struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+							(mode_info->atom_context->bios +
+							 data_offset +
+							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+							 (power_state->ucClockStateIndices[j] *
+							  power_info->info_4.ucClockInfoSize));
+						sclk = le16_to_cpu(clock_info->usEngineClockLow);
+						sclk |= clock_info->ucEngineClockHigh << 16;
+						mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+						mclk |= clock_info->ucMemoryClockHigh << 16;
+						rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+						/* skip invalid modes */
+						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
 							continue;
 						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 							VOLTAGE_SW;
 						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
 							clock_info->usVDDC;
+						/* XXX usVDDCI */
 						mode_index++;
 					} else {
 						struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1855,6 @@
 						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
 						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
 							continue;
-						/* skip overclock modes for now */
-						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
-						     rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
-						     rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-							continue;
 						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 							VOLTAGE_SW;
 						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1866,9 @@
 				if (mode_index) {
 					misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
 					misc2 = le16_to_cpu(non_clock_info->usClassification);
-					rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+					rdev->pm.power_state[state_index].misc = misc;
+					rdev->pm.power_state[state_index].misc2 = misc2;
+					rdev->pm.power_state[state_index].pcie_lanes =
 						((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
 						ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
 					switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1885,36 @@
 							POWER_STATE_TYPE_PERFORMANCE;
 						break;
 					}
+					rdev->pm.power_state[state_index].flags = 0;
+					if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+						rdev->pm.power_state[state_index].flags |=
+							RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
 					if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
 						rdev->pm.power_state[state_index].type =
 							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+						rdev->pm.default_power_state_index = state_index;
 						rdev->pm.power_state[state_index].default_clock_mode =
 							&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
 					}
 					state_index++;
 				}
 			}
+			/* if multiple clock modes, mark the lowest as no display */
+			for (i = 0; i < state_index; i++) {
+				if (rdev->pm.power_state[i].num_clock_modes > 1)
+					rdev->pm.power_state[i].clock_info[0].flags |=
+						RADEON_PM_MODE_NO_DISPLAY;
+			}
+			/* first mode is usually default */
+			if (rdev->pm.default_power_state_index == -1) {
+				rdev->pm.power_state[0].type =
+					POWER_STATE_TYPE_DEFAULT;
+				rdev->pm.default_power_state_index = 0;
+				rdev->pm.power_state[0].default_clock_mode =
+					&rdev->pm.power_state[0].clock_info[0];
+			}
 		}
 	} else {
-		/* XXX figure out some good default low power mode for cards w/out power tables */
-	}
-
-	if (rdev->pm.default_power_state == NULL) {
 		/* add the default mode */
 		rdev->pm.power_state[state_index].type =
 			POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1924,16 @@
 		rdev->pm.power_state[state_index].default_clock_mode =
 			&rdev->pm.power_state[state_index].clock_info[0];
 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-		if (rdev->asic->get_pcie_lanes)
-			rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
-		else
-			rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
-		rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+		rdev->pm.power_state[state_index].pcie_lanes = 16;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].flags = 0;
 		state_index++;
 	}
+
 	rdev->pm.num_power_states = state_index;
 
-	rdev->pm.current_power_state = rdev->pm.default_power_state;
-	rdev->pm.current_clock_mode =
-		rdev->pm.default_power_state->default_clock_mode;
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8ad71f7..fbba938 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -85,12 +85,11 @@
 		pci_unmap_rom(rdev->pdev, bios);
 		return false;
 	}
-	rdev->bios = kmalloc(size, GFP_KERNEL);
+	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
 	if (rdev->bios == NULL) {
 		pci_unmap_rom(rdev->pdev, bios);
 		return false;
 	}
-	memcpy(rdev->bios, bios, size);
 	pci_unmap_rom(rdev->pdev, bios);
 	return true;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8ad..7b5e10d3 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@
 {
 	int edid_info;
 	struct edid *edid;
+	unsigned char *raw;
 	edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
 	if (!edid_info)
 		return false;
 
-	edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
-		       GFP_KERNEL);
+	raw = rdev->bios + edid_info;
+	edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
 	if (edid == NULL)
 		return false;
 
-	memcpy((unsigned char *)edid,
-	       (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+	memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
 
 	if (!drm_edid_is_valid(edid)) {
 		kfree(edid);
@@ -600,7 +600,7 @@
 	}
 	i2c.mm_i2c = false;
 	i2c.i2c_id = 0;
-	i2c.hpd_id = 0;
+	i2c.hpd = RADEON_HPD_NONE;
 
 	if (ddc_line)
 		i2c.valid = true;
@@ -1113,18 +1113,20 @@
 				break;
 
 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
-			    (RBIOS16(tmp + 2) ==
-			     lvds->native_mode.vdisplay)) {
-				lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
-				lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
-				lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
-							       RBIOS16(tmp + 21)) * 8;
+			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+					(RBIOS8(tmp + 23) * 8);
 
-				lvds->native_mode.vtotal = RBIOS16(tmp + 24);
-				lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
-				lvds->native_mode.vsync_end =
-					((RBIOS16(tmp + 28) & 0xf800) >> 11) +
-					(RBIOS16(tmp + 28) & 0x7ff);
+				lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+					(RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+					((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+					((RBIOS16(tmp + 28) & 0xf800) >> 11);
 
 				lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
 				lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@
 						  ATOM_DEVICE_DFP1_SUPPORT);
 
 			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
-			hpd.hpd = RADEON_HPD_NONE;
+			hpd.hpd = RADEON_HPD_1;
 			radeon_add_legacy_connector(dev,
 						    0,
 						    ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@
 	u8 rev, blocks, tmp;
 	int state_index = 0;
 
-	rdev->pm.default_power_state = NULL;
+	rdev->pm.default_power_state_index = -1;
 
 	if (rdev->flags & RADEON_IS_MOBILITY) {
 		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@
 			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
 			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
 				goto default_mode;
-			/* skip overclock modes for now */
-			if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-			     rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-			    (rdev->pm.power_state[state_index].clock_info[0].sclk >
-			     rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-				goto default_mode;
 			rdev->pm.power_state[state_index].type =
 				POWER_STATE_TYPE_BATTERY;
 			misc = RBIOS16(offset + 0x5 + 0x0);
 			if (rev > 4)
 				misc2 = RBIOS16(offset + 0x5 + 0xe);
+			rdev->pm.power_state[state_index].misc = misc;
+			rdev->pm.power_state[state_index].misc2 = misc2;
 			if (misc & 0x4) {
 				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
 				if (misc & 0x8)
@@ -2437,8 +2435,9 @@
 			} else
 				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
 			if (rev > 6)
-				rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+				rdev->pm.power_state[state_index].pcie_lanes =
 					RBIOS8(offset + 0x5 + 0x10);
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
 			state_index++;
 		} else {
 			/* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@
 	rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
 	rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
 	rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-	if (rdev->asic->get_pcie_lanes)
-		rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
-	else
-		rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
-	rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+	rdev->pm.power_state[state_index].pcie_lanes = 16;
+	rdev->pm.power_state[state_index].flags = 0;
+	rdev->pm.default_power_state_index = state_index;
 	rdev->pm.num_power_states = state_index + 1;
 
-	rdev->pm.current_power_state = rdev->pm.default_power_state;
-	rdev->pm.current_clock_mode =
-		rdev->pm.default_power_state->default_clock_mode;
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
 }
 
 void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53..0c7ccc6 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@
 	struct radeon_connector_atom_dig *radeon_dig_connector;
 	uint32_t subpixel_order = SubPixelNone;
 	bool shared_ddc = false;
-	int ret;
 
 	/* fixme - tv/cv/din */
 	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
 			if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@
 		drm_connector_attach_property(&radeon_connector->base,
 					      rdev->mode_info.load_detect_property,
 					      1);
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 		break;
 	case DRM_MODE_CONNECTOR_DVIA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
 			if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
 			if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
 			if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			/* add DP i2c bus */
 			if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@
 	case DRM_MODE_CONNECTOR_9PinDIN:
 		if (radeon_tv == 1) {
 			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-			ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-			if (ret)
-				goto failed;
+			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
 			if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@
 		break;
 	}
 
+	if (hpd->hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
 	connector->display_info.subpixel_order = subpixel_order;
 	drm_sysfs_connector_add(connector);
 	return;
@@ -1250,7 +1242,6 @@
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	uint32_t subpixel_order = SubPixelNone;
-	int ret;
 
 	/* fixme - tv/cv/din */
 	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
 			if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@
 		drm_connector_attach_property(&radeon_connector->base,
 					      rdev->mode_info.load_detect_property,
 					      1);
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 		break;
 	case DRM_MODE_CONNECTOR_DVIA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
 			if (!radeon_connector->ddc_bus)
@@ -1309,9 +1297,7 @@
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_DVID:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
 			if (!radeon_connector->ddc_bus)
@@ -1330,9 +1316,7 @@
 	case DRM_MODE_CONNECTOR_9PinDIN:
 		if (radeon_tv == 1) {
 			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-			ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-			if (ret)
-				goto failed;
+			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
 			/* RS400,RC410,RS480 chipset seems to report a lot
 			 * of false positive on load detect, we haven't yet
@@ -1351,9 +1335,7 @@
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
-		if (ret)
-			goto failed;
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
 			if (!radeon_connector->ddc_bus)
@@ -1366,6 +1348,11 @@
 		break;
 	}
 
+	if (hpd->hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->display_info.subpixel_order = subpixel_order;
 	drm_sysfs_connector_add(connector);
 	return;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe0..ae0fb73 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@
 	int r;
 
 	mutex_lock(&rdev->cs_mutex);
-	if (rdev->gpu_lockup) {
-		mutex_unlock(&rdev->cs_mutex);
-		return -EINVAL;
-	}
 	/* initialize parser */
 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
 	parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e3..a20b612 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -299,24 +299,24 @@
 		sclk = radeon_get_engine_clock(rdev);
 		mclk = rdev->clock.default_mclk;
 
-		a.full = rfixed_const(100);
-		rdev->pm.sclk.full = rfixed_const(sclk);
-		rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-		rdev->pm.mclk.full = rfixed_const(mclk);
-		rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+		a.full = dfixed_const(100);
+		rdev->pm.sclk.full = dfixed_const(sclk);
+		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+		rdev->pm.mclk.full = dfixed_const(mclk);
+		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 
-		a.full = rfixed_const(16);
+		a.full = dfixed_const(16);
 		/* core_bandwidth = sclk(Mhz) * 16 */
-		rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
 	} else {
 		sclk = radeon_get_engine_clock(rdev);
 		mclk = radeon_get_memory_clock(rdev);
 
-		a.full = rfixed_const(100);
-		rdev->pm.sclk.full = rfixed_const(sclk);
-		rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-		rdev->pm.mclk.full = rfixed_const(mclk);
-		rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+		a.full = dfixed_const(100);
+		rdev->pm.sclk.full = dfixed_const(sclk);
+		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+		rdev->pm.mclk.full = dfixed_const(mclk);
+		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 	}
 }
 
@@ -599,9 +599,11 @@
 		spin_lock_init(&rdev->ih.lock);
 	mutex_init(&rdev->gem.mutex);
 	mutex_init(&rdev->pm.mutex);
+	mutex_init(&rdev->vram_mutex);
 	rwlock_init(&rdev->fence_drv.lock);
 	INIT_LIST_HEAD(&rdev->gem.objects);
 	init_waitqueue_head(&rdev->irq.vblank_queue);
+	init_waitqueue_head(&rdev->irq.idle_queue);
 
 	/* setup workqueue */
 	rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@
 		/* Acceleration not working on AGP card try again
 		 * with fallback to PCI or PCIE GART
 		 */
-		radeon_gpu_reset(rdev);
+		radeon_asic_reset(rdev);
 		radeon_fini(rdev);
 		radeon_agp_disable(rdev);
 		r = radeon_init(rdev);
@@ -691,6 +693,8 @@
 {
 	DRM_INFO("radeon: finishing device.\n");
 	rdev->shutdown = true;
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
 	radeon_fini(rdev);
 	destroy_workqueue(rdev->wq);
 	vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@
 			continue;
 		}
 		robj = rfb->obj->driver_private;
-		if (robj != rdev->fbdev_rbo) {
+		/* don't unpin kernel fb objects */
+		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
 			r = radeon_bo_reserve(robj, false);
-			if (unlikely(r == 0)) {
+			if (r == 0) {
 				radeon_bo_unpin(robj);
 				radeon_bo_unreserve(robj);
 			}
@@ -743,6 +748,7 @@
 
 	radeon_save_bios_scratch_regs(rdev);
 
+	radeon_pm_suspend(rdev);
 	radeon_suspend(rdev);
 	radeon_hpd_fini(rdev);
 	/* evict remaining vram memory */
@@ -755,7 +761,7 @@
 		pci_set_power_state(dev->pdev, PCI_D3hot);
 	}
 	acquire_console_sem();
-	fb_set_suspend(rdev->fbdev_info, 1);
+	radeon_fbdev_set_suspend(rdev, 1);
 	release_console_sem();
 	return 0;
 }
@@ -778,8 +784,9 @@
 	/* resume AGP if in use */
 	radeon_agp_resume(rdev);
 	radeon_resume(rdev);
+	radeon_pm_resume(rdev);
 	radeon_restore_bios_scratch_regs(rdev);
-	fb_set_suspend(rdev->fbdev_info, 0);
+	radeon_fbdev_set_suspend(rdev, 0);
 	release_console_sem();
 
 	/* reset hpd state */
@@ -789,6 +796,26 @@
 	return 0;
 }
 
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+	int r;
+
+	radeon_save_bios_scratch_regs(rdev);
+	radeon_suspend(rdev);
+
+	r = radeon_asic_reset(rdev);
+	if (!r) {
+		dev_info(rdev->dev, "GPU reset succeed\n");
+		radeon_resume(rdev);
+		radeon_restore_bios_scratch_regs(rdev);
+		drm_helper_resume_force_mode(rdev->ddev);
+		return 0;
+	}
+	/* bad news, how to tell it to userspace ? */
+	dev_info(rdev->dev, "GPU reset failed\n");
+	return r;
+}
+
 
 /*
  * Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122..1006549 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -633,37 +633,37 @@
 
 	vco_freq = freq * post_div;
 	/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
-	a.full = rfixed_const(pll->reference_freq);
-	feedback_divider.full = rfixed_const(vco_freq);
-	feedback_divider.full = rfixed_div(feedback_divider, a);
-	a.full = rfixed_const(ref_div);
-	feedback_divider.full = rfixed_mul(feedback_divider, a);
+	a.full = dfixed_const(pll->reference_freq);
+	feedback_divider.full = dfixed_const(vco_freq);
+	feedback_divider.full = dfixed_div(feedback_divider, a);
+	a.full = dfixed_const(ref_div);
+	feedback_divider.full = dfixed_mul(feedback_divider, a);
 
 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
 		/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
-		a.full = rfixed_const(10);
-		feedback_divider.full = rfixed_mul(feedback_divider, a);
-		feedback_divider.full += rfixed_const_half(0);
-		feedback_divider.full = rfixed_floor(feedback_divider);
-		feedback_divider.full = rfixed_div(feedback_divider, a);
+		a.full = dfixed_const(10);
+		feedback_divider.full = dfixed_mul(feedback_divider, a);
+		feedback_divider.full += dfixed_const_half(0);
+		feedback_divider.full = dfixed_floor(feedback_divider);
+		feedback_divider.full = dfixed_div(feedback_divider, a);
 
 		/* *fb_div = floor(feedback_divider); */
-		a.full = rfixed_floor(feedback_divider);
-		*fb_div = rfixed_trunc(a);
+		a.full = dfixed_floor(feedback_divider);
+		*fb_div = dfixed_trunc(a);
 		/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
-		a.full = rfixed_const(10);
-		b.full = rfixed_mul(feedback_divider, a);
+		a.full = dfixed_const(10);
+		b.full = dfixed_mul(feedback_divider, a);
 
-		feedback_divider.full = rfixed_floor(feedback_divider);
-		feedback_divider.full = rfixed_mul(feedback_divider, a);
+		feedback_divider.full = dfixed_floor(feedback_divider);
+		feedback_divider.full = dfixed_mul(feedback_divider, a);
 		feedback_divider.full = b.full - feedback_divider.full;
-		*fb_div_frac = rfixed_trunc(feedback_divider);
+		*fb_div_frac = dfixed_trunc(feedback_divider);
 	} else {
 		/* *fb_div = floor(feedback_divider + 0.5); */
-		feedback_divider.full += rfixed_const_half(0);
-		feedback_divider.full = rfixed_floor(feedback_divider);
+		feedback_divider.full += dfixed_const_half(0);
+		feedback_divider.full = dfixed_floor(feedback_divider);
 
-		*fb_div = rfixed_trunc(feedback_divider);
+		*fb_div = dfixed_trunc(feedback_divider);
 		*fb_div_frac = 0;
 	}
 
@@ -693,10 +693,10 @@
 		pll_out_max = pll->pll_out_max;
 	}
 
-	ffreq.full = rfixed_const(freq);
+	ffreq.full = dfixed_const(freq);
 	/* max_error = ffreq * 0.0025; */
-	a.full = rfixed_const(400);
-	max_error.full = rfixed_div(ffreq, a);
+	a.full = dfixed_const(400);
+	max_error.full = dfixed_div(ffreq, a);
 
 	for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
 		if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@
 				continue;
 
 			/* pll_out = vco / post_div; */
-			a.full = rfixed_const(post_div);
-			pll_out.full = rfixed_const(vco);
-			pll_out.full = rfixed_div(pll_out, a);
+			a.full = dfixed_const(post_div);
+			pll_out.full = dfixed_const(vco);
+			pll_out.full = dfixed_div(pll_out, a);
 
 			if (pll_out.full >= ffreq.full) {
 				error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-	struct drm_device *dev = fb->dev;
-
-	if (fb->fbdev)
-		radeonfb_remove(dev, fb);
 
 	if (radeon_fb->obj)
 		drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@
 	.create_handle = radeon_user_framebuffer_create_handle,
 };
 
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
-			  struct drm_mode_fb_cmd *mode_cmd,
-			  struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+			struct radeon_framebuffer *rfb,
+			struct drm_mode_fb_cmd *mode_cmd,
+			struct drm_gem_object *obj)
 {
-	struct radeon_framebuffer *radeon_fb;
-
-	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
-	if (radeon_fb == NULL) {
-		return NULL;
-	}
-	drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
-	drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
-	radeon_fb->obj = obj;
-	return &radeon_fb->base;
+	rfb->obj = obj;
+	drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
 }
 
 static struct drm_framebuffer *
@@ -879,6 +869,7 @@
 			       struct drm_mode_fb_cmd *mode_cmd)
 {
 	struct drm_gem_object *obj;
+	struct radeon_framebuffer *radeon_fb;
 
 	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
 	if (obj ==  NULL) {
@@ -886,12 +877,26 @@
 			"can't create framebuffer\n", mode_cmd->handle);
 		return NULL;
 	}
-	return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+	if (radeon_fb == NULL) {
+		return NULL;
+	}
+
+	radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+	return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	radeon_fb_output_poll_changed(rdev);
 }
 
 static const struct drm_mode_config_funcs radeon_mode_funcs = {
 	.fb_create = radeon_user_framebuffer_create,
-	.fb_changed = radeonfb_probe,
+	.output_poll_changed = radeon_output_poll_changed
 };
 
 struct drm_prop_enum_list {
@@ -978,8 +983,11 @@
 		/* set display priority to high for r3xx, rv515 chips
 		 * this avoids flickering due to underflow to the
 		 * display controllers during heavy acceleration.
+		 * Don't force high on rs4xx igp chips as it seems to
+		 * affect the sound card.  See kernel bug 15982.
 		 */
-		if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+		    !(rdev->flags & RADEON_IS_IGP))
 			rdev->disp_priority = 2;
 		else
 			rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@
 	}
 	/* initialize hpd */
 	radeon_hpd_init(rdev);
-	drm_helper_initial_config(rdev->ddev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	radeon_fbdev_init(rdev);
+	drm_kms_helper_poll_init(rdev->ddev);
+
 	return 0;
 }
 
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
+	radeon_fbdev_fini(rdev);
 	kfree(rdev->mode_info.bios_hardcoded_edid);
+	radeon_pm_fini(rdev);
 
 	if (rdev->mode_info.mode_config_initialized) {
+		drm_kms_helper_poll_fini(rdev->ddev);
 		radeon_hpd_fini(rdev);
 		drm_mode_config_cleanup(rdev->ddev);
 		rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@
 	}
 	if (radeon_crtc->rmx_type != RMX_OFF) {
 		fixed20_12 a, b;
-		a.full = rfixed_const(crtc->mode.vdisplay);
-		b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
-		radeon_crtc->vsc.full = rfixed_div(a, b);
-		a.full = rfixed_const(crtc->mode.hdisplay);
-		b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
-		radeon_crtc->hsc.full = rfixed_div(a, b);
+		a.full = dfixed_const(crtc->mode.vdisplay);
+		b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+		radeon_crtc->vsc.full = dfixed_div(a, b);
+		a.full = dfixed_const(crtc->mode.hdisplay);
+		b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+		radeon_crtc->hsc.full = dfixed_div(a, b);
 	} else {
-		radeon_crtc->vsc.full = rfixed_const(1);
-		radeon_crtc->hsc.full = rfixed_const(1);
+		radeon_crtc->vsc.full = dfixed_const(1);
+		radeon_crtc->hsc.full = dfixed_const(1);
 	}
 	return true;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b3749d4..902d173 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -44,9 +44,10 @@
  * - 2.1.0 - add square tiling interface
  * - 2.2.0 - add r6xx/r7xx const buffer support
  * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	3
+#define KMS_DRIVER_MINOR	4
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@
 int radeon_connector_table = 0;
 int radeon_tv = 1;
 int radeon_new_pll = -1;
-int radeon_dynpm = -1;
 int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@
 MODULE_PARM_DESC(new_pll, "Select new PLL code");
 module_param_named(new_pll, radeon_new_pll, int, 0444);
 
-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
-module_param_named(dynpm, radeon_dynpm, int, 0444);
-
 MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c5ddaf5..1ebb100 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -309,9 +309,6 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	/* adjust pm to upcoming mode change */
-	radeon_pm_compute_clocks(rdev);
-
 	/* set the active encoder to connector routing */
 	radeon_encoder_set_active_device(encoder);
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -1111,8 +1108,6 @@
 	}
 	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 union crtc_source_param {
@@ -1546,10 +1541,49 @@
 
 static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
 {
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig;
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		if (ASIC_IS_DCE4(rdev))
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		atombios_ddia_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+			atombios_tv_setup(encoder, ATOM_DISABLE);
+		break;
+	}
+
 	if (radeon_encoder_is_digital(encoder)) {
 		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
 			r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a0..e192acf 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
  * Authors:
  *     David Airlie
  */
-    /*
-     *  Modularization
-     */
-
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fb.h>
@@ -42,17 +38,21 @@
 
 #include <linux/vga_switcheroo.h>
 
-struct radeon_fb_device {
+/* object hierarchy -
+   this contains a helper + a radeon fb
+   the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
 	struct drm_fb_helper helper;
-	struct radeon_framebuffer	*rfb;
-	struct radeon_device		*rdev;
+	struct radeon_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct radeon_device *rdev;
 };
 
 static struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_setcolreg = drm_fb_helper_setcolreg,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@
 	.fb_setcmap = drm_fb_helper_setcmap,
 };
 
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
-	struct fb_info *info;
-	struct drm_framebuffer *fb;
-	struct drm_display_mode *mode = crtc->desired_mode;
-
-	fb = crtc->fb;
-	if (fb == NULL) {
-		return 1;
-	}
-	info = fb->fbdev;
-	if (info == NULL) {
-		return 1;
-	}
-	if (mode == NULL) {
-		return 1;
-	}
-	info->var.xres = mode->hdisplay;
-	info->var.right_margin = mode->hsync_start - mode->hdisplay;
-	info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-	info->var.left_margin = mode->htotal - mode->hsync_end;
-	info->var.yres = mode->vdisplay;
-	info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-	info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-	info->var.upper_margin = mode->vtotal - mode->vsync_end;
-	info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
-	/* avoid overflow */
-	info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
-	return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
 
 static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
 {
@@ -125,57 +86,44 @@
 	return aligned;
 }
 
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
-	.gamma_set = radeon_crtc_fb_gamma_set,
-	.gamma_get = radeon_crtc_fb_gamma_get,
-};
-
-int radeonfb_create(struct drm_device *dev,
-		    uint32_t fb_width, uint32_t fb_height,
-		    uint32_t surface_width, uint32_t surface_height,
-		    uint32_t surface_depth, uint32_t surface_bpp,
-		    struct drm_framebuffer **fb_p)
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
 {
-	struct radeon_device *rdev = dev->dev_private;
-	struct fb_info *info;
-	struct radeon_fb_device *rfbdev;
-	struct drm_framebuffer *fb = NULL;
-	struct radeon_framebuffer *rfb;
-	struct drm_mode_fb_cmd mode_cmd;
+	struct radeon_bo *rbo = gobj->driver_private;
+	int ret;
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (likely(ret == 0)) {
+		radeon_bo_kunmap(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+					 struct drm_mode_fb_cmd *mode_cmd,
+					 struct drm_gem_object **gobj_p)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
 	struct drm_gem_object *gobj = NULL;
 	struct radeon_bo *rbo = NULL;
-	struct device *device = &rdev->pdev->dev;
-	int size, aligned_size, ret;
-	u64 fb_gpuaddr;
-	void *fbptr = NULL;
-	unsigned long tmp;
 	bool fb_tiled = false; /* useful for testing */
 	u32 tiling_flags = 0;
+	int ret;
+	int aligned_size, size;
 
-	mode_cmd.width = surface_width;
-	mode_cmd.height = surface_height;
-
-	/* avivo can't scanout real 24bpp */
-	if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
-		surface_bpp = 32;
-
-	mode_cmd.bpp = surface_bpp;
 	/* need to align pitch with crtc limits */
-	mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
-	mode_cmd.depth = surface_depth;
+	mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-	size = mode_cmd.pitch * mode_cmd.height;
+	size = mode_cmd->pitch * mode_cmd->height;
 	aligned_size = ALIGN(size, PAGE_SIZE);
-
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
-			RADEON_GEM_DOMAIN_VRAM,
-			false, ttm_bo_type_kernel,
-			&gobj);
+				       RADEON_GEM_DOMAIN_VRAM,
+				       false, ttm_bo_type_kernel,
+				       &gobj);
 	if (ret) {
-		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
-		       surface_width, surface_height);
-		ret = -ENOMEM;
-		goto out;
+		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
 	}
 	rbo = gobj->driver_private;
 
@@ -183,7 +131,7 @@
 		tiling_flags = RADEON_TILING_MACRO;
 
 #ifdef __BIG_ENDIAN
-	switch (mode_cmd.bpp) {
+	switch (mode_cmd->bpp) {
 	case 32:
 		tiling_flags |= RADEON_TILING_SWAP_32BIT;
 		break;
@@ -196,57 +144,81 @@
 
 	if (tiling_flags) {
 		ret = radeon_bo_set_tiling_flags(rbo,
-					tiling_flags | RADEON_TILING_SURFACE,
-					mode_cmd.pitch);
+						 tiling_flags | RADEON_TILING_SURFACE,
+						 mode_cmd->pitch);
 		if (ret)
 			dev_err(rdev->dev, "FB failed to set tiling flags\n");
 	}
-	mutex_lock(&rdev->ddev->struct_mutex);
-	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
-	if (fb == NULL) {
-		DRM_ERROR("failed to allocate fb.\n");
-		ret = -ENOMEM;
-		goto out_unref;
-	}
+
+
 	ret = radeon_bo_reserve(rbo, false);
 	if (unlikely(ret != 0))
 		goto out_unref;
-	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
 	if (ret) {
 		radeon_bo_unreserve(rbo);
 		goto out_unref;
 	}
 	if (fb_tiled)
 		radeon_bo_check_tiling(rbo, 0, 0);
-	ret = radeon_bo_kmap(rbo, &fbptr);
+	ret = radeon_bo_kmap(rbo, NULL);
 	radeon_bo_unreserve(rbo);
 	if (ret) {
 		goto out_unref;
 	}
 
-	list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	radeonfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
 
-	*fb_p = fb;
-	rfb = to_radeon_framebuffer(fb);
-	rdev->fbdev_rfb = rfb;
-	rdev->fbdev_rbo = rbo;
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	struct device *device = &rdev->pdev->dev;
+	int ret;
+	unsigned long tmp;
 
-	info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	/* avivo can't scanout real 24bpp */
+	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+		sizes->surface_bpp = 32;
+
+	mode_cmd.bpp = sizes->surface_bpp;
+	mode_cmd.depth = sizes->surface_depth;
+
+	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+	rbo = gobj->driver_private;
+
+	/* okay we have an object now allocate the framebuffer */
+	info = framebuffer_alloc(0, device);
 	if (info == NULL) {
 		ret = -ENOMEM;
 		goto out_unref;
 	}
 
-	rdev->fbdev_info = info;
-	rfbdev = info->par;
-	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
-	rfbdev->helper.dev = dev;
-	ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
-					    RADEONFB_CONN_LIMIT);
-	if (ret)
-		goto out_unref;
+	info->par = rfbdev;
 
-	memset_io(fbptr, 0x0, aligned_size);
+	radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+
+	fb = &rfbdev->rfb.base;
+
+	/* setup helper */
+	rfbdev->helper.fb = fb;
+	rfbdev->helper.fbdev = info;
+
+	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
 	strcpy(info->fix.id, "radeondrmfb");
 
@@ -255,17 +227,22 @@
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &radeonfb_ops;
 
-	tmp = fb_gpuaddr - rdev->mc.vram_start;
+	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
 	info->fix.smem_start = rdev->mc.aper_base + tmp;
-	info->fix.smem_len = size;
-	info->screen_base = fbptr;
-	info->screen_size = size;
+	info->fix.smem_len = radeon_bo_size(rbo);
+	info->screen_base = rbo->kptr;
+	info->screen_size = radeon_bo_size(rbo);
 
-	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->aperture_base = rdev->ddev->mode_config.fb_base;
-	info->aperture_size = rdev->mc.real_vram_size;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = rdev->mc.real_vram_size;
 
 	info->fix.mmio_start = 0;
 	info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@
 	info->pixmap.access_align = 32;
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
+
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
 		goto out_unref;
 	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
 	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
-	DRM_INFO("size %lu\n", (unsigned long)size);
+	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
 	DRM_INFO("fb depth is %d\n", fb->depth);
 	DRM_INFO("   pitch is %d\n", fb->pitch);
 
-	fb->fbdev = info;
-	rfbdev->rfb = rfb;
-	rfbdev->rdev = rdev;
-
-	mutex_unlock(&rdev->ddev->struct_mutex);
 	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
 	return 0;
 
 out_unref:
 	if (rbo) {
-		ret = radeon_bo_reserve(rbo, false);
-		if (likely(ret == 0)) {
-			radeon_bo_kunmap(rbo);
-			radeon_bo_unreserve(rbo);
-		}
+
 	}
 	if (fb && ret) {
-		list_del(&fb->filp_head);
 		drm_gem_object_unreference(gobj);
 		drm_framebuffer_cleanup(fb);
 		kfree(fb);
 	}
-	drm_gem_object_unreference(gobj);
-	mutex_unlock(&rdev->ddev->struct_mutex);
-out:
 	return ret;
 }
 
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+					   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = radeonfb_create(rfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
 static char *mode_option;
 int radeon_parse_options(char *options)
 {
@@ -328,46 +316,102 @@
 	return 0;
 }
 
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
 {
-	struct radeon_device *rdev = dev->dev_private;
-	int bpp_sel = 32;
-
-	/* select 8 bpp console on RN50 or 16MB cards */
-	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
-		bpp_sel = 8;
-
-	return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
 }
 
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
 	struct fb_info *info;
-	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+	struct radeon_framebuffer *rfb = &rfbdev->rfb;
 	struct radeon_bo *rbo;
 	int r;
 
-	if (!fb) {
-		return -EINVAL;
-	}
-	info = fb->fbdev;
-	if (info) {
-		struct radeon_fb_device *rfbdev = info->par;
-		rbo = rfb->obj->driver_private;
+	if (rfbdev->helper.fbdev) {
+		info = rfbdev->helper.fbdev;
+
 		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (rfb->obj) {
+		rbo = rfb->obj->driver_private;
 		r = radeon_bo_reserve(rbo, false);
 		if (likely(r == 0)) {
 			radeon_bo_kunmap(rbo);
 			radeon_bo_unpin(rbo);
 			radeon_bo_unreserve(rbo);
 		}
-		drm_fb_helper_free(&rfbdev->helper);
-		framebuffer_release(info);
+		drm_gem_object_unreference_unlocked(rfb->obj);
 	}
-
-	printk(KERN_INFO "unregistered panic notifier\n");
+	drm_fb_helper_fini(&rfbdev->helper);
+	drm_framebuffer_cleanup(&rfb->base);
 
 	return 0;
 }
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+	.gamma_set = radeon_crtc_fb_gamma_set,
+	.gamma_get = radeon_crtc_fb_gamma_get,
+	.fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+	struct radeon_fbdev *rfbdev;
+	int bpp_sel = 32;
+
+	/* select 8 bpp console on RN50 or 16MB cards */
+	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+	if (!rfbdev)
+		return -ENOMEM;
+
+	rfbdev->rdev = rdev;
+	rdev->mode_info.rfbdev = rfbdev;
+	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+	drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+			   rdev->num_crtc,
+			   RADEONFB_CONN_LIMIT);
+	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+	if (!rdev->mode_info.rfbdev)
+		return;
+
+	radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+	kfree(rdev->mode_info.rfbdev);
+	rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+	struct radeon_bo *robj;
+	int size = 0;
+
+	robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+	size += radeon_bo_size(robj);
+	return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+	if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+		return true;
+	return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b..b1f9a81 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@
 		radeon_fence_ring_emit(rdev, fence);
 
 	fence->emited = true;
-	fence->timeout = jiffies + ((2000 * HZ) / 1000);
 	list_del(&fence->list);
 	list_add_tail(&fence->list, &rdev->fence_drv.emited);
 	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@
 	struct list_head *i, *n;
 	uint32_t seq;
 	bool wake = false;
+	unsigned long cjiffies;
 
-	if (rdev == NULL) {
-		return true;
-	}
-	if (rdev->shutdown) {
-		return true;
-	}
 	seq = RREG32(rdev->fence_drv.scratch_reg);
-	rdev->fence_drv.last_seq = seq;
+	if (seq != rdev->fence_drv.last_seq) {
+		rdev->fence_drv.last_seq = seq;
+		rdev->fence_drv.last_jiffies = jiffies;
+		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+	} else {
+		cjiffies = jiffies;
+		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+			cjiffies -= rdev->fence_drv.last_jiffies;
+			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+				/* update the timeout */
+				rdev->fence_drv.last_timeout -= cjiffies;
+			} else {
+				/* the 500ms timeout is elapsed we should test
+				 * for GPU lockup
+				 */
+				rdev->fence_drv.last_timeout = 1;
+			}
+		} else {
+			/* wrap around update last jiffies, we will just wait
+			 * a little longer
+			 */
+			rdev->fence_drv.last_jiffies = cjiffies;
+		}
+		return false;
+	}
 	n = NULL;
 	list_for_each(i, &rdev->fence_drv.emited) {
 		fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
 	struct radeon_device *rdev;
-	unsigned long cur_jiffies;
-	unsigned long timeout;
-	bool expired = false;
+	unsigned long irq_flags, timeout;
+	u32 seq;
 	int r;
 
 	if (fence == NULL) {
@@ -184,21 +201,18 @@
 	if (radeon_fence_signaled(fence)) {
 		return 0;
 	}
-
+	timeout = rdev->fence_drv.last_timeout;
 retry:
-	cur_jiffies = jiffies;
-	timeout = HZ / 100;
-	if (time_after(fence->timeout, cur_jiffies)) {
-		timeout = fence->timeout - cur_jiffies;
-	}
-
+	/* save current sequence used to check for GPU lockup */
+	seq = rdev->fence_drv.last_seq;
 	if (intr) {
 		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
 				radeon_fence_signaled(fence), timeout);
 		radeon_irq_kms_sw_irq_put(rdev);
-		if (unlikely(r < 0))
+		if (unlikely(r < 0)) {
 			return r;
+		}
 	} else {
 		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@
 		radeon_irq_kms_sw_irq_put(rdev);
 	}
 	if (unlikely(!radeon_fence_signaled(fence))) {
-		if (unlikely(r == 0)) {
-			expired = true;
+		/* we were interrupted for some reason and fence isn't
+		 * isn't signaled yet, resume wait
+		 */
+		if (r) {
+			timeout = r;
+			goto retry;
 		}
-		if (unlikely(expired)) {
-			timeout = 1;
-			if (time_after(cur_jiffies, fence->timeout)) {
-				timeout = cur_jiffies - fence->timeout;
-			}
-			timeout = jiffies_to_msecs(timeout);
-			if (timeout > 500) {
-				DRM_ERROR("fence(%p:0x%08X) %lums timeout "
-					  "going to reset GPU\n",
-					  fence, fence->seq, timeout);
-				radeon_gpu_reset(rdev);
-				WREG32(rdev->fence_drv.scratch_reg, fence->seq);
-			}
+		/* don't protect read access to rdev->fence_drv.last_seq
+		 * if we experiencing a lockup the value doesn't change
+		 */
+		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+			/* good news we believe it's a lockup */
+			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+			/* FIXME: what should we do ? marking everyone
+			 * as signaled for now
+			 */
+			rdev->gpu_lockup = true;
+			r = radeon_gpu_reset(rdev);
+			if (r)
+				return r;
+			WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+			rdev->gpu_lockup = false;
 		}
+		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+		rdev->fence_drv.last_jiffies = jiffies;
+		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 		goto retry;
 	}
-	if (unlikely(expired)) {
-		rdev->fence_drv.count_timeout++;
-		cur_jiffies = jiffies;
-		timeout = 1;
-		if (time_after(cur_jiffies, fence->timeout)) {
-			timeout = cur_jiffies - fence->timeout;
-		}
-		timeout = jiffies_to_msecs(timeout);
-		DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
-			  fence, fence->seq, timeout);
-		DRM_ERROR("last signaled fence(0x%08X)\n",
-			  rdev->fence_drv.last_seq);
-	}
 	return 0;
 }
 
@@ -333,7 +345,6 @@
 	INIT_LIST_HEAD(&rdev->fence_drv.created);
 	INIT_LIST_HEAD(&rdev->fence_drv.emited);
 	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-	rdev->fence_drv.count_timeout = 0;
 	init_waitqueue_head(&rdev->fence_drv.queue);
 	rdev->fence_drv.initialized = true;
 	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c..e65b9031 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@
 	int i, j;
 
 	if (!rdev->gart.ready) {
-		DRM_ERROR("trying to bind memory to unitialized GART !\n");
+		WARN(1, "trying to bind memory to unitialized GART !\n");
 		return -EINVAL;
 	}
 	t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d14..a72a3ee 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@
 	if (robj) {
 		radeon_bo_unref(&robj);
 	}
+
+	drm_gem_object_release(gobj);
+	kfree(gobj);
 }
 
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@
 	args->vram_visible = rdev->mc.real_vram_size;
 	if (rdev->stollen_vga_memory)
 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
-	if (rdev->fbdev_rbo)
-		args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+	args->vram_visible -= radeon_fbdev_total_size(rdev);
 	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
 		RADEON_IB_POOL_SIZE*64*1024;
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041..059bfa4 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 #include "drmP.h"
+#include "drm_crtc_helper.h"
 #include "radeon_drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
@@ -55,7 +56,7 @@
 			radeon_connector_hotplug(connector);
 	}
 	/* Just fire off a uevent and let userspace tell us what to do */
-	drm_sysfs_hotplug_event(dev);
+	drm_helper_hpd_irq_event(dev);
 }
 
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@
 
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
+	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
 	for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@
 	}
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
+	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
 	for (i = 0; i < 6; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c633319..0406835 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -98,11 +98,15 @@
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_info *info;
+	struct radeon_mode_info *minfo = &rdev->mode_info;
 	uint32_t *value_ptr;
 	uint32_t value;
+	struct drm_crtc *crtc;
+	int i, found;
 
 	info = data;
 	value_ptr = (uint32_t *)((unsigned long)info->value);
+	value = *value_ptr;
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
 		value = dev->pci_device;
@@ -116,6 +120,20 @@
 	case RADEON_INFO_ACCEL_WORKING:
 		value = rdev->accel_working;
 		break;
+	case RADEON_INFO_CRTC_FROM_ID:
+		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+			crtc = (struct drm_crtc *)minfo->crtcs[i];
+			if (crtc && crtc->base.id == value) {
+				value = i;
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			DRM_DEBUG("unknown crtc id %d\n", value);
+			return -EINVAL;
+		}
+		break;
 	default:
 		DRM_DEBUG("Invalid request %d\n", info->request);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 88865e3..e1e5255 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
 #include "radeon.h"
 #include "atom.h"
 
@@ -314,6 +314,9 @@
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
 		if (radeon_crtc->crtc_id)
 			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
 		else {
@@ -335,6 +338,9 @@
 										    RADEON_CRTC_DISP_REQ_EN_B));
 			WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
 		}
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
 		break;
 	}
 }
@@ -966,6 +972,12 @@
 				   struct drm_display_mode *mode,
 				   struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* adjust pm to upcoming mode change */
+	radeon_pm_compute_clocks(rdev);
+
 	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
 		return false;
 	return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0274abe..5a13b3e 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -116,8 +116,6 @@
 	else
 		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,11 +215,6 @@
 				     struct drm_display_mode *adjusted_mode)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-
-	/* adjust pm to upcoming mode change */
-	radeon_pm_compute_clocks(rdev);
 
 	/* set the active encoder to connector routing */
 	radeon_encoder_set_active_device(encoder);
@@ -286,8 +279,6 @@
 	else
 		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -474,8 +465,6 @@
 	else
 		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -642,8 +631,6 @@
 	else
 		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -852,8 +839,6 @@
 	else
 		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-	/* adjust pm to dpms change */
-	radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd6..67358baf 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
 #include <drm_mode.h>
 #include <drm_edid.h>
 #include <drm_dp_helper.h>
+#include <drm_fixed.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
 
+struct radeon_bo;
 struct radeon_device;
 
 #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@
 	TV_STD_PAL_N,
 };
 
+enum radeon_hpd_id {
+	RADEON_HPD_1 = 0,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+	RADEON_HPD_NONE = 0xff,
+};
+
 /* radeon gpio-based i2c
  * 1. "mask" reg and bits
  *    grabs the gpio pins for software use
@@ -84,7 +95,7 @@
 	/* id used by atom */
 	uint8_t i2c_id;
 	/* id used by atom */
-	uint8_t hpd_id;
+	enum radeon_hpd_id hpd;
 	/* can be used with hw i2c engine */
 	bool hw_capable;
 	/* uses multi-media i2c engine */
@@ -202,6 +213,8 @@
 	DVO_SIL1178,
 };
 
+struct radeon_fbdev;
+
 struct radeon_mode_info {
 	struct atom_context *atom_context;
 	struct card_info *atom_card_info;
@@ -218,6 +231,9 @@
 	struct drm_property *tmds_pll_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
+
+	/* pointer to fbdev info structure */
+	struct radeon_fbdev *rfbdev;
 };
 
 #define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@
 	enum radeon_rmx_type rmx_type;
 	struct drm_display_mode native_mode;
 	void *enc_priv;
+	int audio_polling_active;
 	int hdmi_offset;
 	int hdmi_config_offset;
 	int hdmi_audio_workaround;
@@ -363,16 +380,6 @@
 	u32 mask;
 };
 
-enum radeon_hpd_id {
-	RADEON_HPD_NONE = 0,
-	RADEON_HPD_1,
-	RADEON_HPD_2,
-	RADEON_HPD_3,
-	RADEON_HPD_4,
-	RADEON_HPD_5,
-	RADEON_HPD_6,
-};
-
 struct radeon_hpd {
 	enum radeon_hpd_id hpd;
 	u8 plugged_state;
@@ -532,11 +539,10 @@
 				     u16 blue, int regno);
 extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				     u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
-						  struct drm_mode_fb_cmd *mode_cmd,
-						  struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+			     struct radeon_framebuffer *rfb,
+			     struct drm_mode_fb_cmd *mode_cmd,
+			     struct drm_gem_object *obj);
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,13 @@
 void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1227747..d5b9373 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,9 +112,11 @@
 
 	radeon_ttm_placement_from_domain(bo, domain);
 	/* Kernel allocation are uninterruptible */
+	mutex_lock(&rdev->vram_mutex);
 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
 			&bo->placement, 0, 0, !kernel, NULL, size,
 			&radeon_ttm_bo_destroy);
+	mutex_unlock(&rdev->vram_mutex);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
 			dev_err(rdev->dev,
@@ -166,11 +168,15 @@
 void radeon_bo_unref(struct radeon_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
+	struct radeon_device *rdev;
 
 	if ((*bo) == NULL)
 		return;
+	rdev = (*bo)->rdev;
 	tbo = &((*bo)->tbo);
+	mutex_lock(&rdev->vram_mutex);
 	ttm_bo_unref(&tbo);
+	mutex_unlock(&rdev->vram_mutex);
 	if (tbo == NULL)
 		*bo = NULL;
 }
@@ -192,7 +198,7 @@
 	}
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
 	if (likely(r == 0)) {
 		bo->pin_count = 1;
 		if (gpu_addr != NULL)
@@ -216,7 +222,7 @@
 		return 0;
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
 	if (unlikely(r != 0))
 		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
 	return r;
@@ -295,6 +301,7 @@
 		r = radeon_bo_reserve(lobj->bo, false);
 		if (unlikely(r != 0))
 			return r;
+		lobj->reserved = true;
 	}
 	return 0;
 }
@@ -305,7 +312,7 @@
 
 	list_for_each_entry(lobj, head, list) {
 		/* only unreserve object we successfully reserved */
-		if (radeon_bo_is_reserved(lobj->bo))
+		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
 			radeon_bo_unreserve(lobj->bo);
 	}
 }
@@ -316,6 +323,9 @@
 	struct radeon_bo *bo;
 	int r;
 
+	list_for_each_entry(lobj, head, list) {
+		lobj->reserved = false;
+	}
 	r = radeon_bo_list_reserve(head);
 	if (unlikely(r != 0)) {
 		return r;
@@ -331,7 +341,7 @@
 								lobj->rdomain);
 			}
 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
-						true, false);
+						true, false, false);
 			if (unlikely(r))
 				return r;
 		}
@@ -499,11 +509,33 @@
 	radeon_bo_check_tiling(rbo, 0, 1);
 }
 
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+	struct radeon_device *rdev;
 	struct radeon_bo *rbo;
+	unsigned long offset, size;
+	int r;
+
 	if (!radeon_ttm_bo_is_radeon_bo(bo))
-		return;
+		return 0;
 	rbo = container_of(bo, struct radeon_bo, tbo);
 	radeon_bo_check_tiling(rbo, 0, 0);
+	rdev = rbo->rdev;
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		size = bo->mem.num_pages << PAGE_SHIFT;
+		offset = bo->mem.mm_node->start << PAGE_SHIFT;
+		if ((offset + size) > rdev->mc.visible_vram_size) {
+			/* hurrah the memory is not visible ! */
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+			if (unlikely(r != 0))
+				return r;
+			offset = bo->mem.mm_node->start << PAGE_SHIFT;
+			/* this should not happen */
+			if ((offset + size) > rdev->mc.visible_vram_size)
+				return -EINVAL;
+		}
+	}
+	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de..353998d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@
 				bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 					struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a4b5749..a8d162c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,164 +23,122 @@
 #include "drmP.h"
 #include "radeon.h"
 #include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
 
 #define RADEON_IDLE_LOOP_MS 100
 #define RADEON_RECLOCK_DELAY_MS 200
 #define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
 
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
-static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_idle_work_handler(struct work_struct *work);
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
 
-static const char *pm_state_names[4] = {
-	"PM_STATE_DISABLED",
-	"PM_STATE_MINIMUM",
-	"PM_STATE_PAUSED",
-	"PM_STATE_ACTIVE"
-};
+#define ACPI_AC_CLASS           "ac_adapter"
 
-static const char *pm_state_types[5] = {
-	"Default",
-	"Powersave",
-	"Battery",
-	"Balanced",
-	"Performance",
-};
-
-static void radeon_print_power_mode_info(struct radeon_device *rdev)
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+			     unsigned long val,
+			     void *data)
 {
-	int i, j;
-	bool is_default;
+	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
-	DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
-	for (i = 0; i < rdev->pm.num_power_states; i++) {
-		if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
-			is_default = true;
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG("pm: AC\n");
 		else
-			is_default = false;
-		DRM_INFO("State %d %s %s\n", i,
-			 pm_state_types[rdev->pm.power_state[i].type],
-			 is_default ? "(default)" : "");
-		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
-			DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
-		DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
-		for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
-			if (rdev->flags & RADEON_IS_IGP)
-				DRM_INFO("\t\t%d engine: %d\n",
-					 j,
-					 rdev->pm.power_state[i].clock_info[j].sclk * 10);
+			DRM_DEBUG("pm: DC\n");
+
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			if (rdev->pm.profile == PM_PROFILE_AUTO) {
+				mutex_lock(&rdev->pm.mutex);
+				radeon_pm_update_profile(rdev);
+				radeon_pm_set_clocks(rdev);
+				mutex_unlock(&rdev->pm.mutex);
+			}
+		}
+	}
+
+	return NOTIFY_OK;
+}
+#endif
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+	switch (rdev->pm.profile) {
+	case PM_PROFILE_DEFAULT:
+		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+		break;
+	case PM_PROFILE_AUTO:
+		if (power_supply_is_system_supplied() > 0) {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
 			else
-				DRM_INFO("\t\t%d engine/memory: %d/%d\n",
-					 j,
-					 rdev->pm.power_state[i].clock_info[j].sclk * 10,
-					 rdev->pm.power_state[i].clock_info[j].mclk * 10);
-		}
-	}
-}
-
-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
-							   enum radeon_pm_state_type type)
-{
-	int i, j;
-	enum radeon_pm_state_type wanted_types[2];
-	int wanted_count;
-
-	switch (type) {
-	case POWER_STATE_TYPE_DEFAULT:
-	default:
-		return rdev->pm.default_power_state;
-	case POWER_STATE_TYPE_POWERSAVE:
-		if (rdev->flags & RADEON_IS_MOBILITY) {
-			wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
-			wanted_types[1] = POWER_STATE_TYPE_BATTERY;
-			wanted_count = 2;
+				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
 		} else {
-			wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
-			wanted_count = 1;
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
 		}
 		break;
-	case POWER_STATE_TYPE_BATTERY:
-		if (rdev->flags & RADEON_IS_MOBILITY) {
-			wanted_types[0] = POWER_STATE_TYPE_BATTERY;
-			wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
-			wanted_count = 2;
-		} else {
-			wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
-			wanted_count = 1;
-		}
-		break;
-	case POWER_STATE_TYPE_BALANCED:
-	case POWER_STATE_TYPE_PERFORMANCE:
-		wanted_types[0] = type;
-		wanted_count = 1;
-		break;
-	}
-
-	for (i = 0; i < wanted_count; i++) {
-		for (j = 0; j < rdev->pm.num_power_states; j++) {
-			if (rdev->pm.power_state[j].type == wanted_types[i])
-				return &rdev->pm.power_state[j];
-		}
-	}
-
-	return rdev->pm.default_power_state;
-}
-
-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
-							    struct radeon_power_state *power_state,
-							    enum radeon_pm_clock_mode_type type)
-{
-	switch (type) {
-	case POWER_MODE_TYPE_DEFAULT:
-	default:
-		return power_state->default_clock_mode;
-	case POWER_MODE_TYPE_LOW:
-		return &power_state->clock_info[0];
-	case POWER_MODE_TYPE_MID:
-		if (power_state->num_clock_modes > 2)
-			return &power_state->clock_info[1];
+	case PM_PROFILE_LOW:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
 		else
-			return &power_state->clock_info[0];
+			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
 		break;
-	case POWER_MODE_TYPE_HIGH:
-		return &power_state->clock_info[power_state->num_clock_modes - 1];
+	case PM_PROFILE_HIGH:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		break;
 	}
 
+	if (rdev->pm.active_crtc_count == 0) {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+	} else {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+	}
 }
 
-static void radeon_get_power_state(struct radeon_device *rdev,
-				   enum radeon_pm_action action)
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
 {
-	switch (action) {
-	case PM_ACTION_MINIMUM:
-		rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
-		rdev->pm.requested_clock_mode =
-			radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
-		break;
-	case PM_ACTION_DOWNCLOCK:
-		rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
-		rdev->pm.requested_clock_mode =
-			radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
-		break;
-	case PM_ACTION_UPCLOCK:
-		rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
-		rdev->pm.requested_clock_mode =
-			radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
-		break;
-	case PM_ACTION_NONE:
-	default:
-		DRM_ERROR("Requested mode for not defined action\n");
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects))
 		return;
+
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			ttm_bo_unmap_virtual(&bo->tbo);
 	}
-	DRM_INFO("Requested: e: %d m: %d p: %d\n",
-		 rdev->pm.requested_clock_mode->sclk,
-		 rdev->pm.requested_clock_mode->mclk,
-		 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
+	if (rdev->gart.table.vram.robj)
+		ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+	if (rdev->stollen_vga_memory)
+		ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+	if (rdev->r600_blit.shader_obj)
+		ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
 }
 
-static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
 {
 	if (rdev->pm.active_crtcs) {
 		rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@
 
 static void radeon_set_power_state(struct radeon_device *rdev)
 {
-	/* if *_clock_mode are the same, *_power_state are as well */
-	if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+	u32 sclk, mclk;
+
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 		return;
 
-	DRM_INFO("Setting: e: %d m: %d p: %d\n",
-		 rdev->pm.requested_clock_mode->sclk,
-		 rdev->pm.requested_clock_mode->mclk,
-		 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+	if (radeon_gui_idle(rdev)) {
+		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			clock_info[rdev->pm.requested_clock_mode_index].sclk;
+		if (sclk > rdev->clock.default_sclk)
+			sclk = rdev->clock.default_sclk;
 
-	/* set pcie lanes */
-	/* TODO */
+		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			clock_info[rdev->pm.requested_clock_mode_index].mclk;
+		if (mclk > rdev->clock.default_mclk)
+			mclk = rdev->clock.default_mclk;
 
-	/* set voltage */
-	/* TODO */
+		/* voltage, pcie lanes, etc.*/
+		radeon_pm_misc(rdev);
 
-	/* set engine clock */
-	radeon_sync_with_vblank(rdev);
-	radeon_pm_debug_check_in_vbl(rdev, false);
-	radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
-	radeon_pm_debug_check_in_vbl(rdev, true);
+		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			radeon_sync_with_vblank(rdev);
 
-#if 0
-	/* set memory clock */
-	if (rdev->asic->set_memory_clock) {
-		radeon_sync_with_vblank(rdev);
-		radeon_pm_debug_check_in_vbl(rdev, false);
-		radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
-		radeon_pm_debug_check_in_vbl(rdev, true);
+			if (!radeon_pm_in_vbl(rdev))
+				return;
+
+			radeon_pm_prepare(rdev);
+			/* set engine clock */
+			if (sclk != rdev->pm.current_sclk) {
+				radeon_pm_debug_check_in_vbl(rdev, false);
+				radeon_set_engine_clock(rdev, sclk);
+				radeon_pm_debug_check_in_vbl(rdev, true);
+				rdev->pm.current_sclk = sclk;
+				DRM_DEBUG("Setting: e: %d\n", sclk);
+			}
+
+			/* set memory clock */
+			if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+				radeon_pm_debug_check_in_vbl(rdev, false);
+				radeon_set_memory_clock(rdev, mclk);
+				radeon_pm_debug_check_in_vbl(rdev, true);
+				rdev->pm.current_mclk = mclk;
+				DRM_DEBUG("Setting: m: %d\n", mclk);
+			}
+			radeon_pm_finish(rdev);
+		} else {
+			/* set engine clock */
+			if (sclk != rdev->pm.current_sclk) {
+				radeon_sync_with_vblank(rdev);
+				radeon_pm_prepare(rdev);
+				radeon_set_engine_clock(rdev, sclk);
+				radeon_pm_finish(rdev);
+				rdev->pm.current_sclk = sclk;
+				DRM_DEBUG("Setting: e: %d\n", sclk);
+			}
+			/* set memory clock */
+			if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+				radeon_sync_with_vblank(rdev);
+				radeon_pm_prepare(rdev);
+				radeon_set_memory_clock(rdev, mclk);
+				radeon_pm_finish(rdev);
+				rdev->pm.current_mclk = mclk;
+				DRM_DEBUG("Setting: m: %d\n", mclk);
+			}
+		}
+
+		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+	} else
+		DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+	int i;
+
+	mutex_lock(&rdev->ddev->struct_mutex);
+	mutex_lock(&rdev->vram_mutex);
+	mutex_lock(&rdev->cp.mutex);
+
+	/* gui idle int has issues on older chips it seems */
+	if (rdev->family >= CHIP_R600) {
+		if (rdev->irq.installed) {
+			/* wait for GPU idle */
+			rdev->pm.gui_idle = false;
+			rdev->irq.gui_idle = true;
+			radeon_irq_set(rdev);
+			wait_event_interruptible_timeout(
+				rdev->irq.idle_queue, rdev->pm.gui_idle,
+				msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+			rdev->irq.gui_idle = false;
+			radeon_irq_set(rdev);
+		}
+	} else {
+		if (rdev->cp.ready) {
+			struct radeon_fence *fence;
+			radeon_ring_alloc(rdev, 64);
+			radeon_fence_create(rdev, &fence);
+			radeon_fence_emit(rdev, fence);
+			radeon_ring_commit(rdev);
+			radeon_fence_wait(fence, false);
+			radeon_fence_unref(&fence);
+		}
 	}
-#endif
+	radeon_unmap_vram_bos(rdev);
 
-	rdev->pm.current_power_state = rdev->pm.requested_power_state;
-	rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.active_crtcs & (1 << i)) {
+				rdev->pm.req_vblank |= (1 << i);
+				drm_vblank_get(rdev->ddev, i);
+			}
+		}
+	}
+
+	radeon_set_power_state(rdev);
+
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.req_vblank & (1 << i)) {
+				rdev->pm.req_vblank &= ~(1 << i);
+				drm_vblank_put(rdev->ddev, i);
+			}
+		}
+	}
+
+	/* update display watermarks based on new power state */
+	radeon_update_bandwidth_info(rdev);
+	if (rdev->pm.active_crtc_count)
+		radeon_bandwidth_update(rdev);
+
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+	mutex_unlock(&rdev->cp.mutex);
+	mutex_unlock(&rdev->vram_mutex);
+	mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int cp = rdev->pm.profile;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(cp == PM_PROFILE_AUTO) ? "auto" :
+			(cp == PM_PROFILE_LOW) ? "low" :
+			(cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (strncmp("default", buf, strlen("default")) == 0)
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+		else if (strncmp("auto", buf, strlen("auto")) == 0)
+			rdev->pm.profile = PM_PROFILE_AUTO;
+		else if (strncmp("low", buf, strlen("low")) == 0)
+			rdev->pm.profile = PM_PROFILE_LOW;
+		else if (strncmp("high", buf, strlen("high")) == 0)
+			rdev->pm.profile = PM_PROFILE_HIGH;
+		else {
+			DRM_ERROR("invalid power profile!\n");
+			goto fail;
+		}
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	}
+fail:
+	mutex_unlock(&rdev->pm.mutex);
+
+	return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int pm = rdev->pm.pm_method;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+
+	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.pm_method = PM_METHOD_DYNPM;
+		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+		mutex_unlock(&rdev->pm.mutex);
+	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		/* disable dynpm */
+		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+		mutex_unlock(&rdev->pm.mutex);
+	} else {
+		DRM_ERROR("invalid power method!\n");
+		goto fail;
+	}
+	radeon_pm_compute_clocks(rdev);
+fail:
+	return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+	mutex_lock(&rdev->pm.mutex);
+	cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+	rdev->pm.current_power_state_index = -1;
+	rdev->pm.current_clock_mode_index = -1;
+	rdev->pm.current_sclk = 0;
+	rdev->pm.current_mclk = 0;
+	mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+	radeon_pm_compute_clocks(rdev);
 }
 
 int radeon_pm_init(struct radeon_device *rdev)
 {
-	rdev->pm.state = PM_STATE_DISABLED;
-	rdev->pm.planned_action = PM_ACTION_NONE;
-	rdev->pm.downclocked = false;
+	int ret;
+	/* default to profile method */
+	rdev->pm.pm_method = PM_METHOD_PROFILE;
+	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.current_sclk = 0;
+	rdev->pm.current_mclk = 0;
 
 	if (rdev->bios) {
 		if (rdev->is_atom_bios)
 			radeon_atombios_get_power_modes(rdev);
 		else
 			radeon_combios_get_power_modes(rdev);
-		radeon_print_power_mode_info(rdev);
+		radeon_pm_init_profile(rdev);
+		rdev->pm.current_power_state_index = -1;
+		rdev->pm.current_clock_mode_index = -1;
 	}
 
-	if (radeon_debugfs_pm_init(rdev)) {
-		DRM_ERROR("Failed to register debugfs file for PM!\n");
+	if (rdev->pm.num_power_states > 1) {
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			mutex_lock(&rdev->pm.mutex);
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+			mutex_unlock(&rdev->pm.mutex);
+		}
+
+		/* where's the best place to put these? */
+		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+		if (ret)
+			DRM_ERROR("failed to create device file for power profile\n");
+		ret = device_create_file(rdev->dev, &dev_attr_power_method);
+		if (ret)
+			DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+		rdev->acpi_nb.notifier_call = radeon_acpi_event;
+		register_acpi_notifier(&rdev->acpi_nb);
+#endif
+		INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
+		if (radeon_debugfs_pm_init(rdev)) {
+			DRM_ERROR("Failed to register debugfs file for PM!\n");
+		}
+
+		DRM_INFO("radeon: power management initialized\n");
 	}
 
-	INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
-
-	if (radeon_dynpm != -1 && radeon_dynpm) {
-		rdev->pm.state = PM_STATE_PAUSED;
-		DRM_INFO("radeon: dynamic power management enabled\n");
-	}
-
-	DRM_INFO("radeon: power management initialized\n");
-
 	return 0;
 }
 
 void radeon_pm_fini(struct radeon_device *rdev)
 {
+	if (rdev->pm.num_power_states > 1) {
+		mutex_lock(&rdev->pm.mutex);
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			/* cancel work */
+			cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+			/* reset default clocks */
+			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+			radeon_pm_set_clocks(rdev);
+		}
+		mutex_unlock(&rdev->pm.mutex);
+
+		device_remove_file(rdev->dev, &dev_attr_power_profile);
+		device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+		unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+	}
+
 	if (rdev->pm.i2c_bus)
 		radeon_i2c_destroy(rdev->pm.i2c_bus);
 }
@@ -266,146 +483,167 @@
 void radeon_pm_compute_clocks(struct radeon_device *rdev)
 {
 	struct drm_device *ddev = rdev->ddev;
-	struct drm_connector *connector;
+	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
-	int count = 0;
 
-	if (rdev->pm.state == PM_STATE_DISABLED)
+	if (rdev->pm.num_power_states < 2)
 		return;
 
 	mutex_lock(&rdev->pm.mutex);
 
 	rdev->pm.active_crtcs = 0;
-	list_for_each_entry(connector,
-		&ddev->mode_config.connector_list, head) {
-		if (connector->encoder &&
-		    connector->encoder->crtc &&
-		    connector->dpms != DRM_MODE_DPMS_OFF) {
-			radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+	rdev->pm.active_crtc_count = 0;
+	list_for_each_entry(crtc,
+		&ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
 			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
-			++count;
+			rdev->pm.active_crtc_count++;
 		}
 	}
 
-	if (count > 1) {
-		if (rdev->pm.state == PM_STATE_ACTIVE) {
-			cancel_delayed_work(&rdev->pm.idle_work);
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+			if (rdev->pm.active_crtc_count > 1) {
+				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 
-			rdev->pm.state = PM_STATE_PAUSED;
-			rdev->pm.planned_action = PM_ACTION_UPCLOCK;
-			if (rdev->pm.downclocked)
-				radeon_pm_set_clocks(rdev);
+					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
 
-			DRM_DEBUG("radeon: dynamic power management deactivated\n");
-		}
-	} else if (count == 1) {
-		/* TODO: Increase clocks if needed for current mode */
+					DRM_DEBUG("radeon: dynamic power management deactivated\n");
+				}
+			} else if (rdev->pm.active_crtc_count == 1) {
+				/* TODO: Increase clocks if needed for current mode */
 
-		if (rdev->pm.state == PM_STATE_MINIMUM) {
-			rdev->pm.state = PM_STATE_ACTIVE;
-			rdev->pm.planned_action = PM_ACTION_UPCLOCK;
-			radeon_pm_set_clocks(rdev);
+				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
 
-			queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
-				msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
-		}
-		else if (rdev->pm.state == PM_STATE_PAUSED) {
-			rdev->pm.state = PM_STATE_ACTIVE;
-			queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
-				msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
-			DRM_DEBUG("radeon: dynamic power management activated\n");
-		}
-	}
-	else { /* count == 0 */
-		if (rdev->pm.state != PM_STATE_MINIMUM) {
-			cancel_delayed_work(&rdev->pm.idle_work);
+					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					DRM_DEBUG("radeon: dynamic power management activated\n");
+				}
+			} else { /* count == 0 */
+				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 
-			rdev->pm.state = PM_STATE_MINIMUM;
-			rdev->pm.planned_action = PM_ACTION_MINIMUM;
-			radeon_pm_set_clocks(rdev);
+					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+				}
+			}
 		}
 	}
 
 	mutex_unlock(&rdev->pm.mutex);
 }
 
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
 {
-	u32 stat_crtc1 = 0, stat_crtc2 = 0;
+	u32 stat_crtc = 0, vbl = 0, position = 0;
 	bool in_vbl = true;
 
-	if (ASIC_IS_AVIVO(rdev)) {
+	if (ASIC_IS_DCE4(rdev)) {
 		if (rdev->pm.active_crtcs & (1 << 0)) {
-			stat_crtc1 = RREG32(D1CRTC_STATUS);
-			if (!(stat_crtc1 & 1))
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 1)) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 2)) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 3)) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 4)) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 5)) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (rdev->pm.active_crtcs & (1 << 0)) {
+			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+		}
+		if (rdev->pm.active_crtcs & (1 << 1)) {
+			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+		}
+		if (position < vbl && position > 1)
+			in_vbl = false;
+	} else {
+		if (rdev->pm.active_crtcs & (1 << 0)) {
+			stat_crtc = RREG32(RADEON_CRTC_STATUS);
+			if (!(stat_crtc & 1))
 				in_vbl = false;
 		}
 		if (rdev->pm.active_crtcs & (1 << 1)) {
-			stat_crtc2 = RREG32(D2CRTC_STATUS);
-			if (!(stat_crtc2 & 1))
+			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+			if (!(stat_crtc & 1))
 				in_vbl = false;
 		}
 	}
-	if (in_vbl == false)
-		DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
-			 stat_crtc2, finish ? "exit" : "entry");
+
+	if (position < vbl && position > 1)
+		in_vbl = false;
+
 	return in_vbl;
 }
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
-{
-	/*radeon_fence_wait_last(rdev);*/
-	switch (rdev->pm.planned_action) {
-	case PM_ACTION_UPCLOCK:
-		rdev->pm.downclocked = false;
-		break;
-	case PM_ACTION_DOWNCLOCK:
-		rdev->pm.downclocked = true;
-		break;
-	case PM_ACTION_MINIMUM:
-		break;
-	case PM_ACTION_NONE:
-		DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
-		break;
-	}
 
-	radeon_set_power_state(rdev);
-	rdev->pm.planned_action = PM_ACTION_NONE;
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+	u32 stat_crtc = 0;
+	bool in_vbl = radeon_pm_in_vbl(rdev);
+
+	if (in_vbl == false)
+		DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+			 finish ? "exit" : "entry");
+	return in_vbl;
 }
 
-static void radeon_pm_set_clocks(struct radeon_device *rdev)
-{
-	radeon_get_power_state(rdev, rdev->pm.planned_action);
-	mutex_lock(&rdev->cp.mutex);
-
-	if (rdev->pm.active_crtcs & (1 << 0)) {
-		rdev->pm.req_vblank |= (1 << 0);
-		drm_vblank_get(rdev->ddev, 0);
-	}
-	if (rdev->pm.active_crtcs & (1 << 1)) {
-		rdev->pm.req_vblank |= (1 << 1);
-		drm_vblank_get(rdev->ddev, 1);
-	}
-	radeon_pm_set_clocks_locked(rdev);
-	if (rdev->pm.req_vblank & (1 << 0)) {
-		rdev->pm.req_vblank &= ~(1 << 0);
-		drm_vblank_put(rdev->ddev, 0);
-	}
-	if (rdev->pm.req_vblank & (1 << 1)) {
-		rdev->pm.req_vblank &= ~(1 << 1);
-		drm_vblank_put(rdev->ddev, 1);
-	}
-
-	mutex_unlock(&rdev->cp.mutex);
-}
-
-static void radeon_pm_idle_work_handler(struct work_struct *work)
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
 {
 	struct radeon_device *rdev;
+	int resched;
 	rdev = container_of(work, struct radeon_device,
-				pm.idle_work.work);
+				pm.dynpm_idle_work.work);
 
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
 	mutex_lock(&rdev->pm.mutex);
-	if (rdev->pm.state == PM_STATE_ACTIVE) {
+	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
 		unsigned long irq_flags;
 		int not_processed = 0;
 
@@ -421,35 +659,40 @@
 		read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
 		if (not_processed >= 3) { /* should upclock */
-			if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
-				rdev->pm.planned_action = PM_ACTION_NONE;
-			} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
-				rdev->pm.downclocked) {
-				rdev->pm.planned_action =
-					PM_ACTION_UPCLOCK;
-				rdev->pm.action_timeout = jiffies +
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_upclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_UPCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
 			}
 		} else if (not_processed == 0) { /* should downclock */
-			if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
-				rdev->pm.planned_action = PM_ACTION_NONE;
-			} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
-				!rdev->pm.downclocked) {
-				rdev->pm.planned_action =
-					PM_ACTION_DOWNCLOCK;
-				rdev->pm.action_timeout = jiffies +
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_downclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_DOWNCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
 			}
 		}
 
-		if (rdev->pm.planned_action != PM_ACTION_NONE &&
-		    jiffies > rdev->pm.action_timeout) {
+		/* Note, radeon_pm_set_clocks is called with static_switch set
+		 * to false since we want to wait for vbl to avoid flicker.
+		 */
+		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+		    jiffies > rdev->pm.dynpm_action_timeout) {
+			radeon_pm_get_dynpm_state(rdev);
 			radeon_pm_set_clocks(rdev);
 		}
 	}
 	mutex_unlock(&rdev->pm.mutex);
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 
-	queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+	queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
 					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 }
 
@@ -464,7 +707,6 @@
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
 	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
 	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
 	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index eabbc9c..c332f46 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -553,7 +553,6 @@
 #       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
 #define RADEON_CRTC2_CRNT_FRAME             0x0314
 #define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
-#define RADEON_CRTC2_STATUS                 0x03fc
 #define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
 #define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
 #define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
@@ -995,6 +994,7 @@
 #	define RADEON_FP_DETECT_MASK		(1 << 4)
 #	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
 #	define RADEON_FP2_DETECT_MASK		(1 << 10)
+#	define RADEON_GUI_IDLE_MASK		(1 << 19)
 #	define RADEON_SW_INT_ENABLE		(1 << 25)
 #define RADEON_GEN_INT_STATUS               0x0044
 #	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
@@ -1006,6 +1006,8 @@
 #	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
 #	define RADEON_FP2_DETECT_STAT		(1 << 10)
 #	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
+#	define RADEON_GUI_IDLE_STAT		(1 << 19)
+#	define RADEON_GUI_IDLE_STAT_ACK		(1 << 19)
 #	define RADEON_SW_INT_FIRE		(1 << 26)
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d..261e98a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
 	int r;
+	struct radeon_bo *robj;
 
 	if (!rdev->ib_pool.ready) {
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
 	radeon_ib_bogus_cleanup(rdev);
-
-	if (rdev->ib_pool.robj) {
-		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->ib_pool.robj);
-			radeon_bo_unpin(rdev->ib_pool.robj);
-			radeon_bo_unreserve(rdev->ib_pool.robj);
-		}
-		radeon_bo_unref(&rdev->ib_pool.robj);
-		rdev->ib_pool.robj = NULL;
-	}
+	robj = rdev->ib_pool.robj;
+	rdev->ib_pool.robj = NULL;
 	mutex_unlock(&rdev->ib_pool.mutex);
+
+	if (robj) {
+		r = radeon_bo_reserve(robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(robj);
+			radeon_bo_unpin(robj);
+			radeon_bo_unreserve(robj);
+		}
+		radeon_bo_unref(&robj);
+	}
 }
 
 
@@ -258,31 +260,41 @@
 	}
 }
 
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
 {
 	int r;
 
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
 	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
-	mutex_lock(&rdev->cp.mutex);
 	while (ndw > (rdev->cp.ring_free_dw - 1)) {
 		radeon_ring_free_size(rdev);
 		if (ndw < rdev->cp.ring_free_dw) {
 			break;
 		}
 		r = radeon_fence_wait_next(rdev);
-		if (r) {
-			mutex_unlock(&rdev->cp.mutex);
+		if (r)
 			return r;
-		}
 	}
 	rdev->cp.count_dw = ndw;
 	rdev->cp.wptr_old = rdev->cp.wptr;
 	return 0;
 }
 
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+	int r;
+
+	mutex_lock(&rdev->cp.mutex);
+	r = radeon_ring_alloc(rdev, ndw);
+	if (r) {
+		mutex_unlock(&rdev->cp.mutex);
+		return r;
+	}
+	return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
 {
 	unsigned count_dw_pad;
 	unsigned i;
@@ -295,6 +307,11 @@
 	}
 	DRM_MEMORYBARRIER();
 	radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+	radeon_ring_commit(rdev);
 	mutex_unlock(&rdev->cp.mutex);
 }
 
@@ -344,20 +361,23 @@
 void radeon_ring_fini(struct radeon_device *rdev)
 {
 	int r;
+	struct radeon_bo *ring_obj;
 
 	mutex_lock(&rdev->cp.mutex);
-	if (rdev->cp.ring_obj) {
-		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->cp.ring_obj);
-			radeon_bo_unpin(rdev->cp.ring_obj);
-			radeon_bo_unreserve(rdev->cp.ring_obj);
-		}
-		radeon_bo_unref(&rdev->cp.ring_obj);
-		rdev->cp.ring = NULL;
-		rdev->cp.ring_obj = NULL;
-	}
+	ring_obj = rdev->cp.ring_obj;
+	rdev->cp.ring = NULL;
+	rdev->cp.ring_obj = NULL;
 	mutex_unlock(&rdev->cp.mutex);
+
+	if (ring_obj) {
+		r = radeon_bo_reserve(ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(ring_obj);
+			radeon_bo_unpin(ring_obj);
+			radeon_bo_unreserve(ring_obj);
+		}
+		radeon_bo_unref(&ring_obj);
+	}
 }
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b68..e9918d8 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
 #include <linux/seq_file.h>
@@ -162,34 +163,21 @@
 					  (unsigned)type);
 				return -EINVAL;
 			}
-			man->io_offset = rdev->mc.agp_base;
-			man->io_size = rdev->mc.gtt_size;
-			man->io_addr = NULL;
 			if (!rdev->ddev->agp->cant_use_aperture)
-				man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
-					     TTM_MEMTYPE_FLAG_MAPPABLE;
+				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED |
 						 TTM_PL_FLAG_WC;
 			man->default_caching = TTM_PL_FLAG_WC;
-		} else
-#endif
-		{
-			man->io_offset = 0;
-			man->io_size = 0;
-			man->io_addr = NULL;
 		}
+#endif
 		break;
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
 		man->gpu_offset = rdev->mc.vram_start;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
-			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-		man->io_addr = NULL;
-		man->io_offset = rdev->mc.aper_base;
-		man->io_size = rdev->mc.aper_size;
 		break;
 	default:
 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-			    bool evict, int no_wait,
-			    struct ttm_mem_reg *new_mem,
-			    struct ttm_mem_reg *old_mem)
+			bool evict, int no_wait_reserve, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem,
+			struct ttm_mem_reg *old_mem)
 {
 	struct radeon_device *rdev;
 	uint64_t old_start, new_start;
@@ -290,13 +278,14 @@
 	r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
 	/* FIXME: handle copy error */
 	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-				      evict, no_wait, new_mem);
+				      evict, no_wait_reserve, no_wait_gpu, new_mem);
 	radeon_fence_unref(&fence);
 	return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
-				bool evict, bool interruptible, bool no_wait,
+				bool evict, bool interruptible,
+				bool no_wait_reserve, bool no_wait_gpu,
 				struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
@@ -317,7 +306,7 @@
 	placement.busy_placement = &placements;
 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-			     interruptible, no_wait);
+			     interruptible, no_wait_reserve, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
@@ -331,11 +320,11 @@
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out_cleanup:
 	if (tmp_mem.mm_node) {
 		struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@
 }
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
-				bool evict, bool interruptible, bool no_wait,
+				bool evict, bool interruptible,
+				bool no_wait_reserve, bool no_wait_gpu,
 				struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
@@ -369,15 +359,15 @@
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -394,8 +384,9 @@
 }
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
-			  bool evict, bool interruptible, bool no_wait,
-			  struct ttm_mem_reg *new_mem)
+			bool evict, bool interruptible,
+			bool no_wait_reserve, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
 	struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@
 	if (old_mem->mem_type == TTM_PL_VRAM &&
 	    new_mem->mem_type == TTM_PL_SYSTEM) {
 		r = radeon_move_vram_ram(bo, evict, interruptible,
-					    no_wait, new_mem);
+					no_wait_reserve, no_wait_gpu, new_mem);
 	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 		   new_mem->mem_type == TTM_PL_VRAM) {
 		r = radeon_move_ram_vram(bo, evict, interruptible,
-					    no_wait, new_mem);
+					    no_wait_reserve, no_wait_gpu, new_mem);
 	} else {
-		r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+		r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
 	}
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+		r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 	}
-
 	return r;
 }
 
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_TT:
+#if __OS_HAS_AGP
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* RADEON_IS_AGP is set only if AGP is active */
+			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+			mem->bus.base = rdev->mc.agp_base;
+			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+		/* check if it's visible */
+		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+			return -EINVAL;
+		mem->bus.base = rdev->mc.aper_base;
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
 				bool lazy, bool interruptible)
 {
@@ -479,6 +513,8 @@
 	.sync_obj_ref = &radeon_sync_obj_ref,
 	.move_notify = &radeon_bo_move_notify,
 	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
+	.io_mem_free = &radeon_ttm_io_mem_free,
 };
 
 int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo;
+	struct radeon_device *rdev;
 	int r;
 
-	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
 	if (bo == NULL) {
 		return VM_FAULT_NOPAGE;
 	}
+	rdev = radeon_get_rdev(bo->bdev);
+	mutex_lock(&rdev->vram_mutex);
 	r = ttm_vm_ops->fault(vma, vmf);
+	mutex_unlock(&rdev->vram_mutex);
 	return r;
 }
 
@@ -745,8 +785,8 @@
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
-	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
 	unsigned i;
 
 	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@
 			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
 
 	}
-	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+	/* Add ttm page pool to debugfs */
+	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+	radeon_mem_types_list[i].driver_features = 0;
+	radeon_mem_types_list[i].data = NULL;
+	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
 
 #endif
 	return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb2..9e4240b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@
 
 void rs400_gpu_init(struct radeon_device *rdev)
 {
-	/* FIXME: HDP same place on rs400 ? */
-	r100_hdp_reset(rdev);
 	/* FIXME: is this correct ? */
 	r420_pipes_init(rdev);
 	if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@
 	/* setup MC before calling post tables */
 	rs400_mc_program(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@
 
 void rs400_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -497,7 +494,7 @@
 			return r;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@
 
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize memory controller */
 	rs400_mc_init(rdev);
 	/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a..79887ca 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+	u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	}
+
+	dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+		} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+		}
+	} else {
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+	}
+	WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+	dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+			dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+		} else
+			dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+	WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+	hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		hdp_dyn_cntl &= ~HDP_FORCEON;
+	else
+		hdp_dyn_cntl |= HDP_FORCEON;
+	WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+	/* mc_host_dyn seems to cause hangs from time to time */
+	mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+		mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+	else
+		mc_host_dyn_cntl |= MC_HOST_FORCEON;
+	WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+	dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+		dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+	else
+		dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+	WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
 /* hpd for digital panel detect/disconnect */
 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
@@ -147,6 +276,78 @@
 	}
 }
 
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* disable bus mastering */
+	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+	mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+	u32 status, tmp;
+
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	rs600_bm_disable(rdev);
+	/* reset GA+VAP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset MC */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		rdev->gpu_lockup = true;
+		return -1;
+	}
+	rv515_mc_resume(rdev, &save);
+	dev_info(rdev->dev, "GPU reset succeed\n");
+	return 0;
+}
+
 /*
  * GART.
  */
@@ -310,6 +511,9 @@
 	if (rdev->irq.sw_int) {
 		tmp |= S_000040_SW_INT_EN(1);
 	}
+	if (rdev->irq.gui_idle) {
+		tmp |= S_000040_GUI_IDLE(1);
+	}
 	if (rdev->irq.crtc_vblank_int[0]) {
 		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
 	}
@@ -332,9 +536,15 @@
 static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
 {
 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
-	uint32_t irq_mask = ~C_000044_SW_INT;
+	uint32_t irq_mask = S_000044_SW_INT(1);
 	u32 tmp;
 
+	/* the interrupt works, but the status bit is permanently asserted */
+	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+		if (!rdev->irq.gui_idle_acked)
+			irq_mask |= S_000044_GUI_IDLE_STAT(1);
+	}
+
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
 		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@
 	uint32_t r500_disp_int;
 	bool queue_hotplug = false;
 
+	/* reset gui idle ack.  the status bit is broken */
+	rdev->irq.gui_idle_acked = false;
+
 	status = rs600_irq_ack(rdev, &r500_disp_int);
 	if (!status && !r500_disp_int) {
 		return IRQ_NONE;
@@ -390,6 +603,12 @@
 		/* SW interrupt */
 		if (G_000044_SW_INT(status))
 			radeon_fence_process(rdev);
+		/* GUI idle */
+		if (G_000040_GUI_IDLE(status)) {
+			rdev->irq.gui_idle_acked = true;
+			rdev->pm.gui_idle = true;
+			wake_up(&rdev->irq.idle_queue);
+		}
 		/* Vertical blank interrupts */
 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
 			drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@
 		}
 		status = rs600_irq_ack(rdev, &r500_disp_int);
 	}
+	/* reset gui idle ack.  the status bit is broken */
+	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
 		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@
 
 void rs600_gpu_init(struct radeon_device *rdev)
 {
-	r100_hdp_reset(rdev);
 	r420_pipes_init(rdev);
 	/* Wait for mc idle */
 	if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@
 	/* Resume clock before doing reset */
 	rv515_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@
 
 void rs600_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -664,7 +883,7 @@
 		return -EINVAL;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@
 
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize memory controller */
 	rs600_mc_init(rdev);
 	rs600_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d269..a27c13a 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
 #define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
 #define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
 #define   C_000074_MC_IND_DATA                         0x00000000
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 #define R_000134_HDP_FB_LOCATION                     0x000134
 #define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
 #define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
@@ -588,4 +634,38 @@
 #define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
 #define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
 
+/* PLL regs */
+#define GENERAL_PWRMGT                                 0x8
+#define   GLOBAL_PWRMGT_EN                             (1 << 0)
+#define   MOBILE_SU                                    (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH                         0xc
+#define   NORMAL_POWER_SCLK_HILEN(x)                   ((x) << 0)
+#define   NORMAL_POWER_SCLK_LOLEN(x)                   ((x) << 4)
+#define   REDUCED_POWER_SCLK_HILEN(x)                  ((x) << 8)
+#define   REDUCED_POWER_SCLK_LOLEN(x)                  ((x) << 12)
+#define   POWER_D1_SCLK_HILEN(x)                       ((x) << 16)
+#define   POWER_D1_SCLK_LOLEN(x)                       ((x) << 20)
+#define   STATIC_SCREEN_HILEN(x)                       ((x) << 24)
+#define   STATIC_SCREEN_LOLEN(x)                       ((x) << 28)
+#define DYN_SCLK_VOL_CNTL                              0xe
+#define   IO_CG_VOLTAGE_DROP                           (1 << 0)
+#define   VOLTAGE_DROP_SYNC                            (1 << 2)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 3)
+#define HDP_DYN_CNTL                                   0x10
+#define   HDP_FORCEON                                  (1 << 0)
+#define MC_HOST_DYN_CNTL                               0x1e
+#define   MC_HOST_FORCEON                              (1 << 0)
+#define DYN_BACKBIAS_CNTL                              0x29
+#define   IO_CG_BACKBIAS_EN                            (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL                     0x7ee0
+#define   PWRDN_WAIT_BUSY_OFF                          (1 << 0)
+#define   PWRDN_WAIT_PWRSEQ_OFF                        (1 << 4)
+#define   PWRDN_WAIT_PPLL_OFF                          (1 << 8)
+#define   PWRUP_WAIT_PPLL_ON                           (1 << 12)
+#define   PWRUP_WAIT_MEM_INIT_DONE                     (1 << 16)
+#define   PM_ASSERT_RESET                              (1 << 20)
+#define   PM_PWRDN_PPLL                                (1 << 24)
+
 #endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da7..bcc3319 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@
 
 static void rs690_gpu_init(struct radeon_device *rdev)
 {
-	/* FIXME: HDP same place on rs690 ? */
-	r100_hdp_reset(rdev);
 	/* FIXME: is this correct ? */
 	r420_pipes_init(rdev);
 	if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@
 		/* Get various system informations from bios */
 		switch (crev) {
 		case 1:
-			tmp.full = rfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
-			rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
-			rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
-			rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
 			break;
 		case 2:
-			tmp.full = rfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
-			rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
-			rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
-			rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
-			rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
 			break;
 		default:
-			tmp.full = rfixed_const(100);
+			tmp.full = dfixed_const(100);
 			/* We assume the slower possible clock ie worst case */
 			/* DDR 333Mhz */
-			rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
 			/* FIXME: system clock ? */
-			rdev->pm.igp_system_mclk.full = rfixed_const(100);
-			rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
-			rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+			rdev->pm.igp_system_mclk.full = dfixed_const(100);
+			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
 			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
 			break;
 		}
 	} else {
-		tmp.full = rfixed_const(100);
+		tmp.full = dfixed_const(100);
 		/* We assume the slower possible clock ie worst case */
 		/* DDR 333Mhz */
-		rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+		rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
 		/* FIXME: system clock ? */
-		rdev->pm.igp_system_mclk.full = rfixed_const(100);
-		rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-		rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
-		rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+		rdev->pm.igp_system_mclk.full = dfixed_const(100);
+		rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+		rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
 		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
 	}
 	/* Compute various bandwidth */
 	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
-	tmp.full = rfixed_const(4);
-	rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+	tmp.full = dfixed_const(4);
+	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
 	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
 	 *              = ht_clk * ht_width / 5
 	 */
-	tmp.full = rfixed_const(5);
-	rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+	tmp.full = dfixed_const(5);
+	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
 						rdev->pm.igp_ht_link_width);
-	rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
 	if (tmp.full < rdev->pm.max_bandwidth.full) {
 		/* HT link is a limiting factor */
 		rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@
 	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
 	 *                    = (sideport_clk * 14) / 10
 	 */
-	tmp.full = rfixed_const(14);
-	rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
-	tmp.full = rfixed_const(10);
-	rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+	tmp.full = dfixed_const(14);
+	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+	tmp.full = dfixed_const(10);
+	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
 }
 
 void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@
 		return;
 	}
 
-	if (crtc->vsc.full > rfixed_const(2))
-		wm->num_line_pair.full = rfixed_const(2);
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
 	else
-		wm->num_line_pair.full = rfixed_const(1);
+		wm->num_line_pair.full = dfixed_const(1);
 
-	b.full = rfixed_const(mode->crtc_hdisplay);
-	c.full = rfixed_const(256);
-	a.full = rfixed_div(b, c);
-	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
-	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
-	if (a.full < rfixed_const(4)) {
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
-		wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
 	}
 
 	/* Determine consumption rate
@@ -263,23 +261,23 @@
 	 *  vsc = vertical scaling ratio, defined as source/destination
 	 *  hsc = horizontal scaling ration, defined as source/destination
 	 */
-	a.full = rfixed_const(mode->clock);
-	b.full = rfixed_const(1000);
-	a.full = rfixed_div(a, b);
-	pclk.full = rfixed_div(b, a);
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
 	if (crtc->rmx_type != RMX_OFF) {
-		b.full = rfixed_const(2);
+		b.full = dfixed_const(2);
 		if (crtc->vsc.full > b.full)
 			b.full = crtc->vsc.full;
-		b.full = rfixed_mul(b, crtc->hsc);
-		c.full = rfixed_const(2);
-		b.full = rfixed_div(b, c);
-		consumption_time.full = rfixed_div(pclk, b);
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
 	} else {
 		consumption_time.full = pclk.full;
 	}
-	a.full = rfixed_const(1);
-	wm->consumption_rate.full = rfixed_div(a, consumption_time);
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
 	/* Determine line time
@@ -287,18 +285,18 @@
 	 *  LineTime = total number of horizontal pixels
 	 *  pclk = pixel clock period(ns)
 	 */
-	a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-	line_time.full = rfixed_mul(a, pclk);
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
 
 	/* Determine active time
 	 *  ActiveTime = time of active region of display within one line,
 	 *  hactive = total number of horizontal active pixels
 	 *  htotal = total number of horizontal pixels
 	 */
-	a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-	b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-	wm->active_time.full = rfixed_mul(line_time, b);
-	wm->active_time.full = rfixed_div(wm->active_time, a);
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
 
 	/* Maximun bandwidth is the minimun bandwidth of all component */
 	rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@
 		if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
 			rdev->pm.sideport_bandwidth.full)
 			rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
-		read_delay_latency.full = rfixed_const(370 * 800 * 1000);
-		read_delay_latency.full = rfixed_div(read_delay_latency,
+		read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+		read_delay_latency.full = dfixed_div(read_delay_latency,
 			rdev->pm.igp_sideport_mclk);
 	} else {
 		if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@
 		if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
 			rdev->pm.ht_bandwidth.full)
 			rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
-		read_delay_latency.full = rfixed_const(5000);
+		read_delay_latency.full = dfixed_const(5000);
 	}
 
 	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
-	a.full = rfixed_const(16);
-	rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
-	a.full = rfixed_const(1000);
-	rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+	a.full = dfixed_const(16);
+	rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+	a.full = dfixed_const(1000);
+	rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
 	/* Determine chunk time
 	 * ChunkTime = the time it takes the DCP to send one chunk of data
 	 * to the LB which consists of pipeline delay and inter chunk gap
 	 * sclk = system clock(ns)
 	 */
-	a.full = rfixed_const(256 * 13);
-	chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
-	a.full = rfixed_const(10);
-	chunk_time.full = rfixed_div(chunk_time, a);
+	a.full = dfixed_const(256 * 13);
+	chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+	a.full = dfixed_const(10);
+	chunk_time.full = dfixed_div(chunk_time, a);
 
 	/* Determine the worst case latency
 	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@
 	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
 	 *             which consists of pipeline delay and inter chunk gap
 	 */
-	if (rfixed_trunc(wm->num_line_pair) > 1) {
-		a.full = rfixed_const(3);
-		wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
 		wm->worst_case_latency.full += read_delay_latency.full;
 	} else {
-		a.full = rfixed_const(2);
-		wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+		a.full = dfixed_const(2);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
 		wm->worst_case_latency.full += read_delay_latency.full;
 	}
 
@@ -362,34 +360,34 @@
 	 *              of data to the LB which consists of
 	 *  pipeline delay and inter chunk gap
 	 */
-	if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
 		tolerable_latency.full = line_time.full;
 	} else {
-		tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
 		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
-		tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
 		tolerable_latency.full = line_time.full - tolerable_latency.full;
 	}
 	/* We assume worst case 32bits (4 bytes) */
-	wm->dbpp.full = rfixed_const(4 * 8);
+	wm->dbpp.full = dfixed_const(4 * 8);
 
 	/* Determine the maximum priority mark
 	 *  width = viewport width in pixels
 	 */
-	a.full = rfixed_const(16);
-	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
-	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
-	estimated_width.full = rfixed_div(estimated_width, consumption_time);
-	if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
-		wm->priority_mark.full = rfixed_const(10);
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = dfixed_const(10);
 	} else {
-		a.full = rfixed_const(16);
-		wm->priority_mark.full = rfixed_div(estimated_width, a);
-		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }
@@ -441,58 +439,58 @@
 	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
 	if (mode0 && mode1) {
-		if (rfixed_trunc(wm0.dbpp) > 64)
-			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
 		else
 			a.full = wm0.num_line_pair.full;
-		if (rfixed_trunc(wm1.dbpp) > 64)
-			b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
 		else
 			b.full = wm1.num_line_pair.full;
 		a.full += b.full;
-		fill_rate.full = rfixed_div(wm0.sclk, a);
+		fill_rate.full = dfixed_div(wm0.sclk, a);
 		if (wm0.consumption_rate.full > fill_rate.full) {
 			b.full = wm0.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm0.active_time);
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
 			a.full = a.full + b.full;
-			b.full = rfixed_const(16 * 1000);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
 		} else {
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
 		}
 		if (wm1.consumption_rate.full > fill_rate.full) {
 			b.full = wm1.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm1.active_time);
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
 			a.full = a.full + b.full;
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		} else {
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		}
 		if (wm0.priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark.full;
-		if (rfixed_trunc(priority_mark02) < 0)
+		if (dfixed_trunc(priority_mark02) < 0)
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
 		if (wm1.priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark.full;
-		if (rfixed_trunc(priority_mark12) < 0)
+		if (dfixed_trunc(priority_mark12) < 0)
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
-		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
 		if (rdev->disp_priority == 2) {
 			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
 			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@
 		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
 		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	} else if (mode0) {
-		if (rfixed_trunc(wm0.dbpp) > 64)
-			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
 		else
 			a.full = wm0.num_line_pair.full;
-		fill_rate.full = rfixed_div(wm0.sclk, a);
+		fill_rate.full = dfixed_div(wm0.sclk, a);
 		if (wm0.consumption_rate.full > fill_rate.full) {
 			b.full = wm0.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm0.active_time);
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
 			a.full = a.full + b.full;
-			b.full = rfixed_const(16 * 1000);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
 		} else {
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
 		}
 		if (wm0.priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark.full;
-		if (rfixed_trunc(priority_mark02) < 0)
+		if (dfixed_trunc(priority_mark02) < 0)
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
-		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
 		if (rdev->disp_priority == 2)
 			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
 		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@
 		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
 			S_006D4C_D2MODE_PRIORITY_B_OFF(1));
 	} else {
-		if (rfixed_trunc(wm1.dbpp) > 64)
-			a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
 		else
 			a.full = wm1.num_line_pair.full;
-		fill_rate.full = rfixed_div(wm1.sclk, a);
+		fill_rate.full = dfixed_div(wm1.sclk, a);
 		if (wm1.consumption_rate.full > fill_rate.full) {
 			b.full = wm1.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm1.active_time);
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
 			a.full = a.full + b.full;
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		} else {
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		}
 		if (wm1.priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark.full;
-		if (rfixed_trunc(priority_mark12) < 0)
+		if (dfixed_trunc(priority_mark12) < 0)
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
 		if (rdev->disp_priority == 2)
 			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
 		WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@
 	/* Resume clock before doing reset */
 	rv515_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@
 
 void rs690_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -717,7 +714,7 @@
 		return -EINVAL;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@
 
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize memory controller */
 	rs690_mc_init(rdev);
 	rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121..7d9a7b0 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@
 {
 	unsigned pipe_select_current, gb_pipe_select, tmp;
 
-	r100_hdp_reset(rdev);
-	r100_rb2d_reset(rdev);
-
 	if (r100_gui_wait_for_idle(rdev)) {
 		printk(KERN_WARNING "Failed to wait GUI idle while "
 		       "reseting GPU. Bad things might happen.\n");
 	}
-
 	rv515_vga_render_disable(rdev);
-
 	r420_pipes_init(rdev);
 	gb_pipe_select = RREG32(0x402C);
 	tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@
 	}
 }
 
-int rv515_ga_reset(struct radeon_device *rdev)
-{
-	uint32_t tmp;
-	bool reinit_cp;
-	int i;
-
-	reinit_cp = rdev->cp.ready;
-	rdev->cp.ready = false;
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		WREG32(CP_CSQ_MODE, 0);
-		WREG32(CP_CSQ_CNTL, 0);
-		WREG32(RBBM_SOFT_RESET, 0x32005);
-		(void)RREG32(RBBM_SOFT_RESET);
-		udelay(200);
-		WREG32(RBBM_SOFT_RESET, 0);
-		/* Wait to prevent race in RBBM_STATUS */
-		mdelay(1);
-		tmp = RREG32(RBBM_STATUS);
-		if (tmp & ((1 << 20) | (1 << 26))) {
-			DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
-			/* GA still busy soft reset it */
-			WREG32(0x429C, 0x200);
-			WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
-			WREG32(0x43E0, 0);
-			WREG32(0x43E4, 0);
-			WREG32(0x24AC, 0);
-		}
-		/* Wait to prevent race in RBBM_STATUS */
-		mdelay(1);
-		tmp = RREG32(RBBM_STATUS);
-		if (!(tmp & ((1 << 20) | (1 << 26)))) {
-			break;
-		}
-	}
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RBBM_STATUS);
-		if (!(tmp & ((1 << 20) | (1 << 26)))) {
-			DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
-				 tmp);
-			DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
-			DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
-			DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
-			if (reinit_cp) {
-				return r100_cp_init(rdev, rdev->cp.ring_size);
-			}
-			return 0;
-		}
-		DRM_UDELAY(1);
-	}
-	tmp = RREG32(RBBM_STATUS);
-	DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
-	return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
-	uint32_t status;
-
-	/* reset order likely matter */
-	status = RREG32(RBBM_STATUS);
-	/* reset HDP */
-	r100_hdp_reset(rdev);
-	/* reset rb2d */
-	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-		r100_rb2d_reset(rdev);
-	}
-	/* reset GA */
-	if (status & ((1 << 20) | (1 << 26))) {
-		rv515_ga_reset(rdev);
-	}
-	/* reset CP */
-	status = RREG32(RBBM_STATUS);
-	if (status & (1 << 16)) {
-		r100_cp_reset(rdev);
-	}
-	/* Check if GPU is idle */
-	status = RREG32(RBBM_STATUS);
-	if (status & (1 << 31)) {
-		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
-		return -1;
-	}
-	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
-	return 0;
-}
-
 static void rv515_vram_get_type(struct radeon_device *rdev)
 {
 	uint32_t tmp;
@@ -335,7 +245,7 @@
 
 	tmp = RREG32(0x2140);
 	seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
-	radeon_gpu_reset(rdev);
+	radeon_asic_reset(rdev);
 	tmp = RREG32(0x425C);
 	seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
 	return 0;
@@ -503,7 +413,7 @@
 	/* Resume clock before doing reset */
 	rv515_clock_startup(rdev);
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
 			RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@
 
 void rv515_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -573,7 +482,7 @@
 		return -EINVAL;
 	}
 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-	if (radeon_gpu_reset(rdev)) {
+	if (radeon_asic_reset(rdev)) {
 		dev_warn(rdev->dev,
 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 			RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@
 		return -EINVAL;
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* initialize AGP */
 	if (rdev->flags & RADEON_IS_AGP) {
 		r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@
 		return;
 	}
 
-	if (crtc->vsc.full > rfixed_const(2))
-		wm->num_line_pair.full = rfixed_const(2);
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
 	else
-		wm->num_line_pair.full = rfixed_const(1);
+		wm->num_line_pair.full = dfixed_const(1);
 
-	b.full = rfixed_const(mode->crtc_hdisplay);
-	c.full = rfixed_const(256);
-	a.full = rfixed_div(b, c);
-	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
-	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
-	if (a.full < rfixed_const(4)) {
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
-		wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
 	}
 
 	/* Determine consumption rate
@@ -907,23 +814,23 @@
 	 *  vsc = vertical scaling ratio, defined as source/destination
 	 *  hsc = horizontal scaling ration, defined as source/destination
 	 */
-	a.full = rfixed_const(mode->clock);
-	b.full = rfixed_const(1000);
-	a.full = rfixed_div(a, b);
-	pclk.full = rfixed_div(b, a);
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
 	if (crtc->rmx_type != RMX_OFF) {
-		b.full = rfixed_const(2);
+		b.full = dfixed_const(2);
 		if (crtc->vsc.full > b.full)
 			b.full = crtc->vsc.full;
-		b.full = rfixed_mul(b, crtc->hsc);
-		c.full = rfixed_const(2);
-		b.full = rfixed_div(b, c);
-		consumption_time.full = rfixed_div(pclk, b);
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
 	} else {
 		consumption_time.full = pclk.full;
 	}
-	a.full = rfixed_const(1);
-	wm->consumption_rate.full = rfixed_div(a, consumption_time);
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
 	/* Determine line time
@@ -931,27 +838,27 @@
 	 *  LineTime = total number of horizontal pixels
 	 *  pclk = pixel clock period(ns)
 	 */
-	a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-	line_time.full = rfixed_mul(a, pclk);
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
 
 	/* Determine active time
 	 *  ActiveTime = time of active region of display within one line,
 	 *  hactive = total number of horizontal active pixels
 	 *  htotal = total number of horizontal pixels
 	 */
-	a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-	b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-	wm->active_time.full = rfixed_mul(line_time, b);
-	wm->active_time.full = rfixed_div(wm->active_time, a);
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
 
 	/* Determine chunk time
 	 * ChunkTime = the time it takes the DCP to send one chunk of data
 	 * to the LB which consists of pipeline delay and inter chunk gap
 	 * sclk = system clock(Mhz)
 	 */
-	a.full = rfixed_const(600 * 1000);
-	chunk_time.full = rfixed_div(a, rdev->pm.sclk);
-	read_delay_latency.full = rfixed_const(1000);
+	a.full = dfixed_const(600 * 1000);
+	chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+	read_delay_latency.full = dfixed_const(1000);
 
 	/* Determine the worst case latency
 	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@
 	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
 	 *             which consists of pipeline delay and inter chunk gap
 	 */
-	if (rfixed_trunc(wm->num_line_pair) > 1) {
-		a.full = rfixed_const(3);
-		wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
 		wm->worst_case_latency.full += read_delay_latency.full;
 	} else {
 		wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@
 	 *              of data to the LB which consists of
 	 *  pipeline delay and inter chunk gap
 	 */
-	if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
 		tolerable_latency.full = line_time.full;
 	} else {
-		tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
 		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
-		tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
 		tolerable_latency.full = line_time.full - tolerable_latency.full;
 	}
 	/* We assume worst case 32bits (4 bytes) */
-	wm->dbpp.full = rfixed_const(2 * 16);
+	wm->dbpp.full = dfixed_const(2 * 16);
 
 	/* Determine the maximum priority mark
 	 *  width = viewport width in pixels
 	 */
-	a.full = rfixed_const(16);
-	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
-	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
-	estimated_width.full = rfixed_div(estimated_width, consumption_time);
-	if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
 		wm->priority_mark.full = wm->priority_mark_max.full;
 	} else {
-		a.full = rfixed_const(16);
-		wm->priority_mark.full = rfixed_div(estimated_width, a);
-		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }
@@ -1035,58 +942,58 @@
 	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
 
 	if (mode0 && mode1) {
-		if (rfixed_trunc(wm0.dbpp) > 64)
-			a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
 		else
 			a.full = wm0.num_line_pair.full;
-		if (rfixed_trunc(wm1.dbpp) > 64)
-			b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
 		else
 			b.full = wm1.num_line_pair.full;
 		a.full += b.full;
-		fill_rate.full = rfixed_div(wm0.sclk, a);
+		fill_rate.full = dfixed_div(wm0.sclk, a);
 		if (wm0.consumption_rate.full > fill_rate.full) {
 			b.full = wm0.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm0.active_time);
-			a.full = rfixed_const(16);
-			b.full = rfixed_div(b, a);
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
 			priority_mark02.full = a.full + b.full;
 		} else {
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
 		}
 		if (wm1.consumption_rate.full > fill_rate.full) {
 			b.full = wm1.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm1.active_time);
-			a.full = rfixed_const(16);
-			b.full = rfixed_div(b, a);
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
 			priority_mark12.full = a.full + b.full;
 		} else {
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		}
 		if (wm0.priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark.full;
-		if (rfixed_trunc(priority_mark02) < 0)
+		if (dfixed_trunc(priority_mark02) < 0)
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
 		if (wm1.priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark.full;
-		if (rfixed_trunc(priority_mark12) < 0)
+		if (dfixed_trunc(priority_mark12) < 0)
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
-		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
 		if (rdev->disp_priority == 2) {
 			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
 			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@
 		WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
 		WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	} else if (mode0) {
-		if (rfixed_trunc(wm0.dbpp) > 64)
-			a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
 		else
 			a.full = wm0.num_line_pair.full;
-		fill_rate.full = rfixed_div(wm0.sclk, a);
+		fill_rate.full = dfixed_div(wm0.sclk, a);
 		if (wm0.consumption_rate.full > fill_rate.full) {
 			b.full = wm0.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm0.active_time);
-			a.full = rfixed_const(16);
-			b.full = rfixed_div(b, a);
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
 			priority_mark02.full = a.full + b.full;
 		} else {
-			a.full = rfixed_mul(wm0.worst_case_latency,
+			a.full = dfixed_mul(wm0.worst_case_latency,
 						wm0.consumption_rate);
-			b.full = rfixed_const(16);
-			priority_mark02.full = rfixed_div(a, b);
+			b.full = dfixed_const(16);
+			priority_mark02.full = dfixed_div(a, b);
 		}
 		if (wm0.priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark.full;
-		if (rfixed_trunc(priority_mark02) < 0)
+		if (dfixed_trunc(priority_mark02) < 0)
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
-		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
 		if (rdev->disp_priority == 2)
 			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
 		WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@
 		WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
 		WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
 	} else {
-		if (rfixed_trunc(wm1.dbpp) > 64)
-			a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
 		else
 			a.full = wm1.num_line_pair.full;
-		fill_rate.full = rfixed_div(wm1.sclk, a);
+		fill_rate.full = dfixed_div(wm1.sclk, a);
 		if (wm1.consumption_rate.full > fill_rate.full) {
 			b.full = wm1.consumption_rate.full - fill_rate.full;
-			b.full = rfixed_mul(b, wm1.active_time);
-			a.full = rfixed_const(16);
-			b.full = rfixed_div(b, a);
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
 			priority_mark12.full = a.full + b.full;
 		} else {
-			a.full = rfixed_mul(wm1.worst_case_latency,
+			a.full = dfixed_mul(wm1.worst_case_latency,
 						wm1.consumption_rate);
-			b.full = rfixed_const(16 * 1000);
-			priority_mark12.full = rfixed_div(a, b);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
 		}
 		if (wm1.priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark.full;
-		if (rfixed_trunc(priority_mark12) < 0)
+		if (dfixed_trunc(priority_mark12) < 0)
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
 		if (rdev->disp_priority == 2)
 			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
 		WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e4..590309a 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
 /* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 #define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
 #define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
 #define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a6..253f24a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,10 @@
 static void rv770_gpu_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
 
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+
+}
 
 /*
  * GART
@@ -237,7 +241,6 @@
 	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 }
 
-
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
@@ -272,6 +275,11 @@
 	return 0;
 }
 
+void r700_cp_fini(struct radeon_device *rdev)
+{
+	r700_cp_stop(rdev);
+	radeon_ring_fini(rdev);
+}
 
 /*
  * Core functions
@@ -906,23 +914,12 @@
 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	/* FIXME remove this once we support unmappable VRAM */
-	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-		rdev->mc.real_vram_size = rdev->mc.aper_size;
-	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
 }
 
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
-	/* FIXME: implement any rv770 specific bits */
-	return r600_gpu_reset(rdev);
-}
-
 static int rv770_startup(struct radeon_device *rdev)
 {
 	int r;
@@ -1094,8 +1091,6 @@
 	r = radeon_clocks_init(rdev);
 	if (r)
 		return r;
-	/* Initialize power management */
-	radeon_pm_init(rdev);
 	/* Fence driver */
 	r = radeon_fence_driver_init(rdev);
 	if (r)
@@ -1132,7 +1127,7 @@
 	r = rv770_startup(rdev);
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
-		r600_cp_fini(rdev);
+		r700_cp_fini(rdev);
 		r600_wb_fini(rdev);
 		r600_irq_fini(rdev);
 		radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@
 
 void rv770_fini(struct radeon_device *rdev)
 {
-	radeon_pm_fini(rdev);
 	r600_blit_fini(rdev);
-	r600_cp_fini(rdev);
+	r700_cp_fini(rdev);
 	r600_wb_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2..2d0c9ca 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@
 {
 	drm_savage_private_t *dev_priv;
 
-	dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+	dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
-	memset(dev_priv, 0, sizeof(drm_savage_private_t));
 	dev->dev_private = (void *)dev_priv;
 
 	dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5..4256e20 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
 	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
-	ttm_object.o ttm_lock.o ttm_execbuf_util.o
+	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a..555ebb1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@
 	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
 	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
 	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
-	printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
-	printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
 	printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
 	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
 		man->available_caching);
@@ -357,7 +355,8 @@
 
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *mem,
-				  bool evict, bool interruptible, bool no_wait)
+				  bool evict, bool interruptible,
+				  bool no_wait_reserve, bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@
 
 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-		ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 	else if (bdev->driver->move)
 		ret = bdev->driver->move(bo, evict, interruptible,
-					 no_wait, mem);
+					 no_wait_reserve, no_wait_gpu, mem);
 	else
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 
 	if (ret)
 		goto out_err;
@@ -605,8 +604,22 @@
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+	return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+	if (resched)
+		schedule_delayed_work(&bdev->wq,
+				      ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-			bool no_wait)
+			bool no_wait_reserve, bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@
 	int ret = 0;
 
 	spin_lock(&bo->lock);
-	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 	spin_unlock(&bo->lock);
 
 	if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@
 
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
+	evict_mem.bus.io_reserved = false;
 
 	placement.fpfn = 0;
 	placement.lpfn = 0;
@@ -638,7 +652,7 @@
 	placement.num_busy_placement = 0;
 	bdev->driver->evict_flags(bo, &placement);
 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-				no_wait);
+				no_wait_reserve, no_wait_gpu);
 	if (ret) {
 		if (ret != -ERESTARTSYS) {
 			printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@
 	}
 
 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-				     no_wait);
+				     no_wait_reserve, no_wait_gpu);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 				uint32_t mem_type,
-				bool interruptible, bool no_wait)
+				bool interruptible, bool no_wait_reserve,
+				bool no_wait_gpu)
 {
 	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@
 	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
 	kref_get(&bo->list_kref);
 
-	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
 
 	if (unlikely(ret == -EBUSY)) {
 		spin_unlock(&glob->lru_lock);
-		if (likely(!no_wait))
+		if (likely(!no_wait_gpu))
 			ret = ttm_bo_wait_unreserved(bo, interruptible);
 
 		kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@
 	while (put_count--)
 		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-	ret = ttm_bo_evict(bo, interruptible, no_wait);
+	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
 	ttm_bo_unreserve(bo);
 
 	kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@
 					uint32_t mem_type,
 					struct ttm_placement *placement,
 					struct ttm_mem_reg *mem,
-					bool interruptible, bool no_wait)
+					bool interruptible,
+					bool no_wait_reserve,
+					bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@
 		}
 		spin_unlock(&glob->lru_lock);
 		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-						no_wait);
+						no_wait_reserve, no_wait_gpu);
 		if (unlikely(ret != 0))
 			return ret;
 	} while (1);
@@ -855,7 +872,8 @@
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
 			struct ttm_mem_reg *mem,
-			bool interruptible, bool no_wait)
+			bool interruptible, bool no_wait_reserve,
+			bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@
 		}
 
 		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-						interruptible, no_wait);
+						interruptible, no_wait_reserve, no_wait_gpu);
 		if (ret == 0 && mem->mm_node) {
 			mem->placement = cur_flags;
 			mem->mm_node->private = bo;
@@ -978,7 +996,8 @@
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
-			bool interruptible, bool no_wait)
+			bool interruptible, bool no_wait_reserve,
+			bool no_wait_gpu)
 {
 	struct ttm_bo_global *glob = bo->glob;
 	int ret = 0;
@@ -992,20 +1011,21 @@
 	 * instead of doing it here.
 	 */
 	spin_lock(&bo->lock);
-	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 	spin_unlock(&bo->lock);
 	if (ret)
 		return ret;
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
+	mem.bus.io_reserved = false;
 	/*
 	 * Determine where to move the buffer.
 	 */
-	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
 	if (ret)
 		goto out_unlock;
-	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
 out_unlock:
 	if (ret && mem.mm_node) {
 		spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
-			bool interruptible, bool no_wait)
+			bool interruptible, bool no_wait_reserve,
+			bool no_wait_gpu)
 {
 	int ret;
 
@@ -1054,7 +1075,7 @@
 	 */
 	ret = ttm_bo_mem_compat(placement, &bo->mem);
 	if (ret < 0) {
-		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
 		if (ret)
 			return ret;
 	} else {
@@ -1153,6 +1174,7 @@
 	bo->mem.num_pages = bo->num_pages;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
+	bo->mem.bus.io_reserved = false;
 	bo->buffer_start = buffer_start & PAGE_MASK;
 	bo->priv_flags = 0;
 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@
 			goto out_err;
 	}
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
 	if (ret)
 		goto out_err;
 
@@ -1249,7 +1271,7 @@
 	spin_lock(&glob->lru_lock);
 	while (!list_empty(&man->lru)) {
 		spin_unlock(&glob->lru_lock);
-		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
 		if (ret) {
 			if (allow_errors) {
 				return ret;
@@ -1553,26 +1575,6 @@
 	return true;
 }
 
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-		      struct ttm_mem_reg *mem,
-		      unsigned long *bus_base,
-		      unsigned long *bus_offset, unsigned long *bus_size)
-{
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
-	*bus_size = 0;
-	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-		return -EINVAL;
-
-	if (ttm_mem_reg_is_pci(bdev, mem)) {
-		*bus_offset = mem->mm_node->start << PAGE_SHIFT;
-		*bus_size = mem->num_pages << PAGE_SHIFT;
-		*bus_base = man->io_offset;
-	}
-
-	return 0;
-}
-
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@
 
 	if (!bdev->dev_mapping)
 		return;
-
 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+	ttm_mem_io_free(bdev, &bo->mem);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
@@ -1811,7 +1813,7 @@
 		evict_mem.mem_type = TTM_PL_SYSTEM;
 
 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-					     false, false);
+					     false, false, false);
 		if (unlikely(ret != 0))
 			goto out;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82..13012a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-		    bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+		    bool evict, bool no_wait_reserve,
+		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
 	struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	int ret;
+
+	if (!mem->bus.io_reserved) {
+		mem->bus.io_reserved = true;
+		ret = bdev->driver->io_mem_reserve(bdev, mem);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	if (bdev->driver->io_mem_reserve) {
+		if (mem->bus.io_reserved) {
+			mem->bus.io_reserved = false;
+			bdev->driver->io_mem_free(bdev, mem);
+		}
+	}
+}
+
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	unsigned long bus_offset;
-	unsigned long bus_size;
-	unsigned long bus_base;
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
-	ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
-	if (ret || bus_size == 0)
+	ret = ttm_mem_io_reserve(bdev, mem);
+	if (ret || !mem->bus.is_iomem)
 		return ret;
 
-	if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-		addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-	else {
+	if (mem->bus.addr) {
+		addr = mem->bus.addr;
+	} else {
 		if (mem->placement & TTM_PL_FLAG_WC)
-			addr = ioremap_wc(bus_base + bus_offset, bus_size);
+			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
 		else
-			addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-		if (!addr)
+			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+		if (!addr) {
+			ttm_mem_io_free(bdev, mem);
 			return -ENOMEM;
+		}
 	}
 	*virtual = addr;
 	return 0;
@@ -117,8 +139,9 @@
 
 	man = &bdev->man[mem->mem_type];
 
-	if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+	if (virtual && mem->bus.addr == NULL)
 		iounmap(virtual);
+	ttm_mem_io_free(bdev, mem);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-		       bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+		       bool evict, bool no_wait_reserve, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@
 EXPORT_SYMBOL(ttm_io_prot);
 
 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
-			  unsigned long bus_base,
-			  unsigned long bus_offset,
-			  unsigned long bus_size,
+			  unsigned long offset,
+			  unsigned long size,
 			  struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_reg *mem = &bo->mem;
-	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
-	if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+	if (bo->mem.bus.addr) {
 		map->bo_kmap_type = ttm_bo_map_premapped;
-		map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
 	} else {
 		map->bo_kmap_type = ttm_bo_map_iomap;
 		if (mem->placement & TTM_PL_FLAG_WC)
-			map->virtual = ioremap_wc(bus_base + bus_offset,
-						  bus_size);
+			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+						  size);
 		else
-			map->virtual = ioremap_nocache(bus_base + bus_offset,
-						       bus_size);
+			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+						       size);
 	}
 	return (!map->virtual) ? -ENOMEM : 0;
 }
@@ -441,13 +462,12 @@
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
+	unsigned long offset, size;
 	int ret;
-	unsigned long bus_base;
-	unsigned long bus_offset;
-	unsigned long bus_size;
 
 	BUG_ON(!list_empty(&bo->swap));
 	map->virtual = NULL;
+	map->bo = bo;
 	if (num_pages > bo->num_pages)
 		return -EINVAL;
 	if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@
 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
 		return -EPERM;
 #endif
-	ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
-				&bus_offset, &bus_size);
+	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
 	if (ret)
 		return ret;
-	if (bus_size == 0) {
+	if (!bo->mem.bus.is_iomem) {
 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 	} else {
-		bus_offset += start_page << PAGE_SHIFT;
-		bus_size = num_pages << PAGE_SHIFT;
-		return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+		offset = start_page << PAGE_SHIFT;
+		size = num_pages << PAGE_SHIFT;
+		return ttm_bo_ioremap(bo, offset, size, map);
 	}
 }
 EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@
 	switch (map->bo_kmap_type) {
 	case ttm_bo_map_iomap:
 		iounmap(map->virtual);
+		ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
 		break;
 	case ttm_bo_map_vmap:
 		vunmap(map->virtual);
@@ -494,39 +514,11 @@
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
-		    unsigned long dst_offset,
-		    unsigned long *pfn, pgprot_t *prot)
-{
-	struct ttm_mem_reg *mem = &bo->mem;
-	struct ttm_bo_device *bdev = bo->bdev;
-	unsigned long bus_offset;
-	unsigned long bus_size;
-	unsigned long bus_base;
-	int ret;
-	ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
-			&bus_size);
-	if (ret)
-		return -EINVAL;
-	if (bus_size != 0)
-		*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
-	else
-		if (!bo->ttm)
-			return -EINVAL;
-		else
-			*pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
-							   dst_offset >>
-							   PAGE_SHIFT));
-	*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
-		PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
-	return 0;
-}
-
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      void *sync_obj,
 			      void *sync_obj_arg,
-			      bool evict, bool no_wait,
+			      bool evict, bool no_wait_reserve,
+			      bool no_wait_gpu,
 			      struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8..fe6cb77 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 	    vma->vm_private_data;
 	struct ttm_bo_device *bdev = bo->bdev;
-	unsigned long bus_base;
-	unsigned long bus_offset;
-	unsigned long bus_size;
 	unsigned long page_offset;
 	unsigned long page_last;
 	unsigned long pfn;
@@ -84,7 +81,6 @@
 	struct page *page;
 	int ret;
 	int i;
-	bool is_iomem;
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	int retval = VM_FAULT_NOPAGE;
 
@@ -101,8 +97,21 @@
 		return VM_FAULT_NOPAGE;
 	}
 
-	if (bdev->driver->fault_reserve_notify)
-		bdev->driver->fault_reserve_notify(bo);
+	if (bdev->driver->fault_reserve_notify) {
+		ret = bdev->driver->fault_reserve_notify(bo);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			set_need_resched();
+		case -ERESTARTSYS:
+			retval = VM_FAULT_NOPAGE;
+			goto out_unlock;
+		default:
+			retval = VM_FAULT_SIGBUS;
+			goto out_unlock;
+		}
+	}
 
 	/*
 	 * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@
 		spin_unlock(&bo->lock);
 
 
-	ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
-				&bus_size);
-	if (unlikely(ret != 0)) {
+	ret = ttm_mem_io_reserve(bdev, &bo->mem);
+	if (ret) {
 		retval = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
 
-	is_iomem = (bus_size != 0);
-
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 	    bo->vm_node->start - vma->vm_pgoff;
 	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@
 	 * vma->vm_page_prot when the object changes caching policy, with
 	 * the correct locks held.
 	 */
-
-	if (is_iomem) {
+	if (bo->mem.bus.is_iomem) {
 		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
 						vma->vm_page_prot);
 	} else {
@@ -171,10 +176,8 @@
 	 */
 
 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
-		if (is_iomem)
-			pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
-			    page_offset;
+		if (bo->mem.bus.is_iomem)
+			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
 		else {
 			page = ttm_tt_get_page(ttm, page_offset);
 			if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@
 			retval =
 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 			goto out_unlock;
-
 		}
 
 		address += PAGE_SIZE;
@@ -221,8 +223,7 @@
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
-	struct ttm_buffer_object *bo =
-	    (struct ttm_buffer_object *)vma->vm_private_data;
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 
 	ttm_bo_unref(&bo);
 	vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702..e70ddd8 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
 
 #include "ttm/ttm_memory.h"
 #include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
@@ -393,6 +394,7 @@
 		       "Zone %7s: Available graphics memory: %llu kiB.\n",
 		       zone->name, (unsigned long long) zone->max_mem >> 10);
 	}
+	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
 	return 0;
 out_no_zone:
 	ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@
 	unsigned int i;
 	struct ttm_mem_zone *zone;
 
+	/* let the page allocator first stop the shrink work. */
+	ttm_page_alloc_fini();
+
 	flush_workqueue(glob->swap_queue);
 	destroy_workqueue(glob->swap_queue);
 	glob->swap_queue = NULL;
@@ -412,7 +417,7 @@
 		zone = glob->zones[i];
 		kobject_del(&zone->kobj);
 		kobject_put(&zone->kobj);
-	}
+			}
 	kobject_del(&glob->kobj);
 	kobject_put(&glob->kobj);
 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 0000000..0d9a42c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Jerome Glisse <jglisse@redhat.com>
+ *          Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION		16
+#define FREE_ALL_PAGES			(~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL		1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+	spinlock_t		lock;
+	bool			fill_lock;
+	struct list_head	list;
+	int			gfp_flags;
+	unsigned		npages;
+	char			*name;
+	unsigned long		nfrees;
+	unsigned long		nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+	unsigned	alloc_size;
+	unsigned	max_size;
+	unsigned	small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+	struct kobject		kobj;
+	struct shrinker		mm_shrink;
+	atomic_t		page_alloc_inited;
+	struct ttm_pool_opts	options;
+
+	union {
+		struct ttm_page_pool	pools[NUM_POOLS];
+		struct {
+			struct ttm_page_pool	wc_pool;
+			struct ttm_page_pool	uc_pool;
+			struct ttm_page_pool	wc_pool_dma32;
+			struct ttm_page_pool	uc_pool_dma32;
+		} ;
+	};
+};
+
+static struct attribute ttm_page_pool_max = {
+	.name = "pool_max_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+	.name = "pool_small_allocation",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+	.name = "pool_allocation_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+	&ttm_page_pool_max,
+	&ttm_page_pool_small,
+	&ttm_page_pool_alloc_size,
+	NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	(void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+		struct attribute *attr, const char *buffer, size_t size)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	int chars;
+	unsigned val;
+	chars = sscanf(buffer, "%u", &val);
+	if (chars == 0)
+		return size;
+
+	/* Convert kb to number of pages */
+	val = val / (PAGE_SIZE >> 10);
+
+	if (attr == &ttm_page_pool_max)
+		m->options.max_size = val;
+	else if (attr == &ttm_page_pool_small)
+		m->options.small = val;
+	else if (attr == &ttm_page_pool_alloc_size) {
+		if (val > NUM_PAGES_TO_ALLOC*8) {
+			printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+					"is not allowed. Recomended size is "
+					"%lu\n",
+					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			return size;
+		} else if (val > NUM_PAGES_TO_ALLOC) {
+			printk(KERN_WARNING "[ttm] Setting allocation size to "
+					"larger than %lu is not recomended.\n",
+					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+		}
+		m->options.alloc_size = val;
+	}
+
+	return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+		struct attribute *attr, char *buffer)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	unsigned val = 0;
+
+	if (attr == &ttm_page_pool_max)
+		val = m->options.max_size;
+	else if (attr == &ttm_page_pool_small)
+		val = m->options.small;
+	else if (attr == &ttm_page_pool_alloc_size)
+		val = m->options.alloc_size;
+
+	val = val * (PAGE_SIZE >> 10);
+
+	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+	.show = &ttm_pool_show,
+	.store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+	.release = &ttm_pool_kobj_release,
+	.sysfs_ops = &ttm_pool_sysfs_ops,
+	.default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+	.page_alloc_inited	= ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		unmap_page_from_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+		enum ttm_caching_state cstate)
+{
+	int pool_index;
+
+	if (cstate == tt_cached)
+		return NULL;
+
+	if (cstate == tt_wc)
+		pool_index = 0x0;
+	else
+		pool_index = 0x1;
+
+	if (flags & TTM_PAGE_FLAG_DMA32)
+		pool_index |= 0x2;
+
+	return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+	unsigned i;
+	if (set_pages_array_wb(pages, npages))
+		printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+				npages);
+	for (i = 0; i < npages; ++i)
+		__free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+		unsigned freed_pages)
+{
+	pool->npages -= freed_pages;
+	pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+	unsigned long irq_flags;
+	struct page *p;
+	struct page **pages_to_free;
+	unsigned freed_pages = 0,
+		 npages_to_free = nr_free;
+
+	if (NUM_PAGES_TO_ALLOC < nr_free)
+		npages_to_free = NUM_PAGES_TO_ALLOC;
+
+	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+			GFP_KERNEL);
+	if (!pages_to_free) {
+		printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+		return 0;
+	}
+
+restart:
+	spin_lock_irqsave(&pool->lock, irq_flags);
+
+	list_for_each_entry_reverse(p, &pool->list, lru) {
+		if (freed_pages >= npages_to_free)
+			break;
+
+		pages_to_free[freed_pages++] = p;
+		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+			/* remove range of pages from the pool */
+			__list_del(p->lru.prev, &pool->list);
+
+			ttm_pool_update_free_locked(pool, freed_pages);
+			/**
+			 * Because changing page caching is costly
+			 * we unlock the pool to prevent stalling.
+			 */
+			spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+			ttm_pages_put(pages_to_free, freed_pages);
+			if (likely(nr_free != FREE_ALL_PAGES))
+				nr_free -= freed_pages;
+
+			if (NUM_PAGES_TO_ALLOC >= nr_free)
+				npages_to_free = nr_free;
+			else
+				npages_to_free = NUM_PAGES_TO_ALLOC;
+
+			freed_pages = 0;
+
+			/* free all so restart the processing */
+			if (nr_free)
+				goto restart;
+
+			/* Not allowed to fall tough or break because
+			 * following context is inside spinlock while we are
+			 * outside here.
+			 */
+			goto out;
+
+		}
+	}
+
+	/* remove range of pages from the pool */
+	if (freed_pages) {
+		__list_del(&p->lru, &pool->list);
+
+		ttm_pool_update_free_locked(pool, freed_pages);
+		nr_free -= freed_pages;
+	}
+
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (freed_pages)
+		ttm_pages_put(pages_to_free, freed_pages);
+out:
+	kfree(pages_to_free);
+	return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+	unsigned i;
+	int total = 0;
+	for (i = 0; i < NUM_POOLS; ++i)
+		total += _manager.pools[i].npages;
+
+	return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+	static atomic_t start_pool = ATOMIC_INIT(0);
+	unsigned i;
+	unsigned pool_offset = atomic_add_return(1, &start_pool);
+	struct ttm_page_pool *pool;
+
+	pool_offset = pool_offset % NUM_POOLS;
+	/* select start pool in round robin fashion */
+	for (i = 0; i < NUM_POOLS; ++i) {
+		unsigned nr_free = shrink_pages;
+		if (shrink_pages == 0)
+			break;
+		pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+		shrink_pages = ttm_page_pool_free(pool, nr_free);
+	}
+	/* return estimated number of unused pages in pool */
+	return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+	manager->mm_shrink.seeks = 1;
+	register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+	unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+		enum ttm_caching_state cstate, unsigned cpages)
+{
+	int r = 0;
+	/* Set page caching */
+	switch (cstate) {
+	case tt_uncached:
+		r = set_pages_array_uc(pages, cpages);
+		if (r)
+			printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+					cpages);
+		break;
+	case tt_wc:
+		r = set_pages_array_wc(pages, cpages);
+		if (r)
+			printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+					cpages);
+		break;
+	default:
+		break;
+	}
+	return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+		int ttm_flags, enum ttm_caching_state cstate,
+		struct page **failed_pages, unsigned cpages)
+{
+	unsigned i;
+	/* Failed pages has to be reed */
+	for (i = 0; i < cpages; ++i) {
+		list_del(&failed_pages[i]->lru);
+		__free_page(failed_pages[i]);
+	}
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+	struct page **caching_array;
+	struct page *p;
+	int r = 0;
+	unsigned i, cpages;
+	unsigned max_cpages = min(count,
+			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+	/* allocate array for page caching change */
+	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+	if (!caching_array) {
+		printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+		return -ENOMEM;
+	}
+
+	for (i = 0, cpages = 0; i < count; ++i) {
+		p = alloc_page(gfp_flags);
+
+		if (!p) {
+			printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+			/* store already allocated pages in the pool after
+			 * setting the caching state */
+			if (cpages) {
+				r = ttm_set_pages_caching(caching_array, cstate, cpages);
+				if (r)
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+			}
+			r = -ENOMEM;
+			goto out;
+		}
+
+#ifdef CONFIG_HIGHMEM
+		/* gfp flags of highmem page should never be dma32 so we
+		 * we should be fine in such case
+		 */
+		if (!PageHighMem(p))
+#endif
+		{
+			caching_array[cpages++] = p;
+			if (cpages == max_cpages) {
+
+				r = ttm_set_pages_caching(caching_array,
+						cstate, cpages);
+				if (r) {
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+					goto out;
+				}
+				cpages = 0;
+			}
+		}
+
+		list_add(&p->lru, pages);
+	}
+
+	if (cpages) {
+		r = ttm_set_pages_caching(caching_array, cstate, cpages);
+		if (r)
+			ttm_handle_caching_state_failure(pages,
+					ttm_flags, cstate,
+					caching_array, cpages);
+	}
+out:
+	kfree(caching_array);
+
+	return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+		unsigned long *irq_flags)
+{
+	struct page *p;
+	int r;
+	unsigned cpages = 0;
+	/**
+	 * Only allow one pool fill operation at a time.
+	 * If pool doesn't have enough pages for the allocation new pages are
+	 * allocated from outside of pool.
+	 */
+	if (pool->fill_lock)
+		return;
+
+	pool->fill_lock = true;
+
+	/* If allocation request is small and there is not enough
+	 * pages in pool we fill the pool first */
+	if (count < _manager.options.small
+		&& count > pool->npages) {
+		struct list_head new_pages;
+		unsigned alloc_size = _manager.options.alloc_size;
+
+		/**
+		 * Can't change page caching if in irqsave context. We have to
+		 * drop the pool->lock.
+		 */
+		spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+		INIT_LIST_HEAD(&new_pages);
+		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+				cstate,	alloc_size);
+		spin_lock_irqsave(&pool->lock, *irq_flags);
+
+		if (!r) {
+			list_splice(&new_pages, &pool->list);
+			++pool->nrefills;
+			pool->npages += alloc_size;
+		} else {
+			printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+			/* If we have any pages left put them to the pool. */
+			list_for_each_entry(p, &pool->list, lru) {
+				++cpages;
+			}
+			list_splice(&new_pages, &pool->list);
+			pool->npages += cpages;
+		}
+
+	}
+	pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+		struct list_head *pages, int ttm_flags,
+		enum ttm_caching_state cstate, unsigned count)
+{
+	unsigned long irq_flags;
+	struct list_head *p;
+	unsigned i;
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+	if (count >= pool->npages) {
+		/* take all pages from the pool */
+		list_splice_init(&pool->list, pages);
+		count -= pool->npages;
+		pool->npages = 0;
+		goto out;
+	}
+	/* find the last pages to include for requested number of pages. Split
+	 * pool to begin and halves to reduce search space. */
+	if (count <= pool->npages/2) {
+		i = 0;
+		list_for_each(p, &pool->list) {
+			if (++i == count)
+				break;
+		}
+	} else {
+		i = pool->npages + 1;
+		list_for_each_prev(p, &pool->list) {
+			if (--i == count)
+				break;
+		}
+	}
+	/* Cut count number of pages from pool */
+	list_cut_position(pages, &pool->list, p);
+	pool->npages -= count;
+	count = 0;
+out:
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+		enum ttm_caching_state cstate, unsigned count)
+{
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	struct page *p = NULL;
+	int gfp_flags = 0;
+	int r;
+
+	/* set zero flag for page allocation if required */
+	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+		gfp_flags |= __GFP_ZERO;
+
+	/* No pool for cached pages */
+	if (pool == NULL) {
+		if (flags & TTM_PAGE_FLAG_DMA32)
+			gfp_flags |= GFP_DMA32;
+		else
+			gfp_flags |= __GFP_HIGHMEM;
+
+		for (r = 0; r < count; ++r) {
+			p = alloc_page(gfp_flags);
+			if (!p) {
+
+				printk(KERN_ERR "[ttm] unable to allocate page.");
+				return -ENOMEM;
+			}
+
+			list_add(&p->lru, pages);
+		}
+		return 0;
+	}
+
+
+	/* combine zero flag to pool flags */
+	gfp_flags |= pool->gfp_flags;
+
+	/* First we take pages from the pool */
+	count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+	/* clear the pages coming from the pool if requested */
+	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+		list_for_each_entry(p, pages, lru) {
+			clear_page(page_address(p));
+		}
+	}
+
+	/* If pool didn't have enough pages allocate new one. */
+	if (count > 0) {
+		/* ttm_alloc_new_pages doesn't reference pool so we can run
+		 * multiple requests in parallel.
+		 **/
+		r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+		if (r) {
+			/* If there is any pages in the list put them back to
+			 * the pool. */
+			printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+					"for large request.");
+			ttm_put_pages(pages, 0, flags, cstate);
+			return r;
+		}
+	}
+
+
+	return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+		enum ttm_caching_state cstate)
+{
+	unsigned long irq_flags;
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	struct page *p, *tmp;
+
+	if (pool == NULL) {
+		/* No pool for this memory type so free the pages */
+
+		list_for_each_entry_safe(p, tmp, pages, lru) {
+			__free_page(p);
+		}
+		/* Make the pages list empty */
+		INIT_LIST_HEAD(pages);
+		return;
+	}
+	if (page_count == 0) {
+		list_for_each_entry_safe(p, tmp, pages, lru) {
+			++page_count;
+		}
+	}
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	list_splice_init(pages, &pool->list);
+	pool->npages += page_count;
+	/* Check that we don't go over the pool limit */
+	page_count = 0;
+	if (pool->npages > _manager.options.max_size) {
+		page_count = pool->npages - _manager.options.max_size;
+		/* free at least NUM_PAGES_TO_ALLOC number of pages
+		 * to reduce calls to set_memory_wb */
+		if (page_count < NUM_PAGES_TO_ALLOC)
+			page_count = NUM_PAGES_TO_ALLOC;
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	if (page_count)
+		ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+		char *name)
+{
+	spin_lock_init(&pool->lock);
+	pool->fill_lock = false;
+	INIT_LIST_HEAD(&pool->list);
+	pool->npages = pool->nfrees = 0;
+	pool->gfp_flags = flags;
+	pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+	int ret;
+	if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+		return 0;
+
+	printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+	ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+	ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+	ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+			"wc dma");
+
+	ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+			"uc dma");
+
+	_manager.options.max_size = max_pages;
+	_manager.options.small = SMALL_ALLOCATION;
+	_manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+	kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+	ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+	if (unlikely(ret != 0)) {
+		kobject_put(&_manager.kobj);
+		return ret;
+	}
+
+	ttm_pool_mm_shrink_init(&_manager);
+
+	return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+	int i;
+
+	if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+		return;
+
+	printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+	ttm_pool_mm_shrink_fini(&_manager);
+
+	for (i = 0; i < NUM_POOLS; ++i)
+		ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+	kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	struct ttm_page_pool *p;
+	unsigned i;
+	char *h[] = {"pool", "refills", "pages freed", "size"};
+	if (atomic_read(&_manager.page_alloc_inited) == 0) {
+		seq_printf(m, "No pool allocator running.\n");
+		return 0;
+	}
+	seq_printf(m, "%6s %12s %13s %8s\n",
+			h[0], h[1], h[2], h[3]);
+	for (i = 0; i < NUM_POOLS; ++i) {
+		p = &_manager.pools[i];
+
+		seq_printf(m, "%6s %12ld %13ld %8d\n",
+				p->name, p->nrefills,
+				p->nfrees, p->npages);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8..a7bab87 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
 
 static int ttm_tt_swapin(struct ttm_tt *ttm);
 
@@ -56,21 +57,6 @@
 	ttm->pages = NULL;
 }
 
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
-	gfp_t gfp_flags = GFP_USER;
-
-	if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-		gfp_flags |= __GFP_ZERO;
-
-	if (page_flags & TTM_PAGE_FLAG_DMA32)
-		gfp_flags |= __GFP_DMA32;
-	else
-		gfp_flags |= __GFP_HIGHMEM;
-
-	return alloc_page(gfp_flags);
-}
-
 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
 {
 	int write;
@@ -111,15 +97,21 @@
 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
 {
 	struct page *p;
+	struct list_head h;
 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
 	int ret;
 
 	while (NULL == (p = ttm->pages[index])) {
-		p = ttm_tt_alloc_page(ttm->page_flags);
 
-		if (!p)
+		INIT_LIST_HEAD(&h);
+
+		ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+		if (ret != 0)
 			return NULL;
 
+		p = list_first_entry(&h, struct page, lru);
+
 		ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
 		if (unlikely(ret != 0))
 			goto out_err;
@@ -228,10 +220,10 @@
 	if (ttm->caching_state == c_state)
 		return 0;
 
-	if (c_state != tt_cached) {
-		ret = ttm_tt_populate(ttm);
-		if (unlikely(ret != 0))
-			return ret;
+	if (ttm->state == tt_unpopulated) {
+		/* Change caching but don't populate */
+		ttm->caching_state = c_state;
+		return 0;
 	}
 
 	if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@
 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
 {
 	int i;
+	unsigned count = 0;
+	struct list_head h;
 	struct page *cur_page;
 	struct ttm_backend *be = ttm->be;
 
+	INIT_LIST_HEAD(&h);
+
 	if (be)
 		be->func->clear(be);
-	(void)ttm_tt_set_caching(ttm, tt_cached);
 	for (i = 0; i < ttm->num_pages; ++i) {
+
 		cur_page = ttm->pages[i];
 		ttm->pages[i] = NULL;
 		if (cur_page) {
@@ -298,9 +294,11 @@
 				       "Leaking pages.\n");
 			ttm_mem_global_free_page(ttm->glob->mem_glob,
 						 cur_page);
-			__free_page(cur_page);
+			list_add(&cur_page->lru, &h);
+			count++;
 		}
 	}
+	ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
 	ttm->state = tt_unpopulated;
 	ttm->first_himem_page = ttm->num_pages;
 	ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3..c4f5114 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@
 int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		      struct ttm_mem_type_manager *man)
 {
-	struct vmw_private *dev_priv =
-	    container_of(bdev, struct vmw_private, bdev);
-
 	switch (type) {
 	case TTM_PL_SYSTEM:
 		/* System memory */
@@ -151,11 +148,7 @@
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
 		man->gpu_offset = 0;
-		man->io_offset = dev_priv->vram_start;
-		man->io_size = dev_priv->vram_size;
-		man->flags = TTM_MEMTYPE_FLAG_FIXED |
-		    TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
-		man->io_addr = NULL;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_WC;
 		break;
@@ -193,6 +186,42 @@
 	vmw_dmabuf_gmr_unbind(bo);
 }
 
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.is_iomem = false;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+		mem->bus.base = dev_priv->vram_start;
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	return 0;
+}
+
 /**
  * FIXME: We're using the old vmware polling method to sync.
  * Do this with fences instead.
@@ -248,5 +277,8 @@
 	.sync_obj_unref = vmw_sync_obj_unref,
 	.sync_obj_ref = vmw_sync_obj_ref,
 	.move_notify = vmw_move_notify,
-	.swap_notify = vmw_swap_notify
+	.swap_notify = vmw_swap_notify,
+	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
+	.io_mem_free = &vmw_ttm_io_mem_free,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359..dbd36b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@
 	 * Put BO in VRAM, only if there is space.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
 	if (unlikely(ret == -ERESTARTSYS))
 		return ret;
 
@@ -590,7 +590,7 @@
 	 * previous contents.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a933670..7421aaa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,8 +559,13 @@
 	info->pixmap.scan_align = 1;
 #endif
 
-	info->aperture_base = vmw_priv->vram_start;
-	info->aperture_size = vmw_priv->vram_size;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_aper;
+	}
+	info->apertures->ranges[0].base = vmw_priv->vram_start;
+	info->apertures->ranges[0].size = vmw_priv->vram_size;
 
 	/*
 	 * Dirty & Deferred IO
@@ -580,6 +585,7 @@
 
 err_defio:
 	fb_deferred_io_cleanup(info);
+err_aper:
 	ttm_bo_kunmap(&par->map);
 err_unref:
 	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+	ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
 	ttm_bo_unreserve(bo);
 
 	return ret;
@@ -652,7 +658,7 @@
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
-	ret = ttm_bo_validate(bo, &ne_placement, false, false);
+	ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
 	ttm_bo_unreserve(bo);
 err_unlock:
 	ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afe..bbc7c4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@
 	return NULL;
 }
 
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
-	return 0;
-}
-
 static struct drm_mode_config_funcs vmw_kms_funcs = {
 	.fb_create = vmw_kms_fb_create,
-	.fb_changed = vmw_kms_fb_changed,
 };
 
 int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabe..ad566c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@
 	if (pin)
 		overlay_placement = &vmw_vram_ne_placement;
 
-	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
 
 	ttm_bo_unreserve(bo);
 
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 61ab4da..8d0e31a 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -18,12 +18,12 @@
 	  multiple GPUS.  The overhead for each GPU is very small.
 
 config VGA_SWITCHEROO
-	bool "Laptop Hybrid Grapics - GPU switching support"
+	bool "Laptop Hybrid Graphics - GPU switching support"
 	depends on X86
 	depends on ACPI
 	help
-	  Many laptops released in 2008/9/10 have two gpus with a multiplxer
+	  Many laptops released in 2008/9/10 have two GPUs with a multiplexer
 	  to switch between them. This adds support for dynamic switching when
           X isn't running and delayed switching until the next logoff. This
-	  features is called hybrid graphics, ATI PowerXpress, and Nvidia
+	  feature is called hybrid graphics, ATI PowerXpress, and Nvidia
 	  HybridPower.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71d4c07..76ba59b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -86,6 +86,12 @@
 	---help---
 	Support for Belkin Flip KVM and Wireless keyboard.
 
+config HID_CANDO
+	tristate "Cando dual touch panel"
+	depends on USB_HID
+	---help---
+	Support for Cando dual touch panel.
+
 config HID_CHERRY
 	tristate "Cherry" if EMBEDDED
 	depends on USB_HID
@@ -100,6 +106,21 @@
 	---help---
 	Support for Chicony Tactical pad.
 
+config HID_PRODIKEYS
+	tristate "Prodikeys PC-MIDI Keyboard support"
+	depends on USB_HID && SND
+	select SND_RAWMIDI
+	---help---
+	Support for Prodikeys PC-MIDI Keyboard device support.
+	Say Y here to enable support for this device.
+	- Prodikeys PC-MIDI keyboard.
+	  The Prodikeys PC-MIDI acts as a USB Audio device, with one MIDI
+	  input and one MIDI output. These MIDI jacks appear as
+	  a sound "card" in the ALSA sound system.
+	  Note: if you say N here, this device will still function as a basic
+	  multimedia keyboard, but will lack support for the musical keyboard
+	  and some additional multimedia keys.
+
 config HID_CYPRESS
 	tristate "Cypress" if EMBEDDED
 	depends on USB_HID
@@ -108,9 +129,8 @@
 	Support for cypress mouse and barcode readers.
 
 config HID_DRAGONRISE
-	tristate "DragonRise Inc. support" if EMBEDDED
+	tristate "DragonRise Inc. support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Say Y here if you have DragonRise Inc.game controllers.
 
@@ -122,6 +142,12 @@
 	Say Y here if you want to enable force feedback support for DragonRise Inc.
 	game controllers.
 
+config HID_EGALAX
+	tristate "eGalax multi-touch panel"
+	depends on USB_HID
+	---help---
+	Support for the eGalax dual-touch panel.
+
 config HID_EZKEY
 	tristate "Ezkey" if EMBEDDED
 	depends on USB_HID
@@ -137,16 +163,14 @@
 	Support for Kye/Genius Ergo Mouse.
 
 config HID_GYRATION
-	tristate "Gyration" if EMBEDDED
+	tristate "Gyration"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Gyration remote control.
 
 config HID_TWINHAN
-	tristate "Twinhan" if EMBEDDED
+	tristate "Twinhan"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Twinhan IR remote control.
 
@@ -233,16 +257,14 @@
 	Support for N-Trig touch screen.
 
 config HID_ORTEK
-	tristate "Ortek" if EMBEDDED
+	tristate "Ortek"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
 
 config HID_PANTHERLORD
-	tristate "Pantherlord support" if EMBEDDED
+	tristate "Pantherlord support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	  Say Y here if you have a PantherLord/GreenAsia based game controller
 	  or adapter.
@@ -256,29 +278,90 @@
 	  or adapter and want to enable force feedback support for it.
 
 config HID_PETALYNX
-	tristate "Petalynx" if EMBEDDED
+	tristate "Petalynx"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Petalynx Maxter remote control.
 
+config HID_PICOLCD
+	tristate "PicoLCD (graphic version)"
+	depends on USB_HID
+	---help---
+	  This provides support for Minibox PicoLCD devices, currently
+	  only the graphical ones are supported.
+
+	  This includes support for the following device features:
+	  - Keypad
+	  - Switching between Firmware and Flash mode
+	  - EEProm / Flash access     (via debugfs)
+	  Features selectively enabled:
+	  - Framebuffer for monochrome 256x64 display
+	  - Backlight control
+	  - Contrast control
+	  - General purpose outputs
+	  Features that are not (yet) supported:
+	  - IR
+
+config HID_PICOLCD_FB
+	bool "Framebuffer support" if EMBEDDED
+	default !EMBEDDED
+	depends on HID_PICOLCD
+	depends on HID_PICOLCD=FB || FB=y
+	select FB_DEFERRED_IO
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	---help---
+	  Provide access to PicoLCD's 256x64 monochrome display via a
+	  frambuffer device.
+
+config HID_PICOLCD_BACKLIGHT
+	bool "Backlight control" if EMBEDDED
+	default !EMBEDDED
+	depends on HID_PICOLCD
+	depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
+	---help---
+	  Provide access to PicoLCD's backlight control via backlight
+	  class.
+
+config HID_PICOLCD_LCD
+	bool "Contrast control" if EMBEDDED
+	default !EMBEDDED
+	depends on HID_PICOLCD
+	depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
+	---help---
+	  Provide access to PicoLCD's LCD contrast via lcd class.
+
+config HID_PICOLCD_LEDS
+	bool "GPO via leds class" if EMBEDDED
+	default !EMBEDDED
+	depends on HID_PICOLCD
+	depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
+	---help---
+	  Provide access to PicoLCD's GPO pins via leds class.
+
 config HID_QUANTA
 	tristate "Quanta Optical Touch"
 	depends on USB_HID
 	---help---
 	Support for Quanta Optical Touch dual-touch panels.
 
-config HID_SAMSUNG
-	tristate "Samsung" if EMBEDDED
+config HID_ROCCAT_KONE
+	tristate "Roccat Kone Mouse support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
-	Support for Samsung InfraRed remote control.
+	Support for Roccat Kone mouse.
+
+config HID_SAMSUNG
+	tristate "Samsung"
+	depends on USB_HID
+	---help---
+	Support for Samsung InfraRed remote control or keyboards.
 
 config HID_SONY
-	tristate "Sony" if EMBEDDED
+	tristate "Sony"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Sony PS3 controller.
 
@@ -289,16 +372,14 @@
 	Support for Stantum multitouch panel.
 
 config HID_SUNPLUS
-	tristate "Sunplus" if EMBEDDED
+	tristate "Sunplus"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for Sunplus wireless desktop.
 
 config HID_GREENASIA
-	tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
+	tristate "GreenAsia (Product ID 0x12) support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	  Say Y here if you have a GreenAsia (Product ID 0x12) based game
 	  controller or adapter.
@@ -313,9 +394,8 @@
 	and want to enable force feedback support for it.
 
 config HID_SMARTJOYPLUS
-	tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
+	tristate "SmartJoy PLUS PS2/USB adapter support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for SmartJoy PLUS PS2/USB adapter.
 
@@ -328,16 +408,14 @@
 	enable force feedback support for it.
 
 config HID_TOPSEED
-	tristate "TopSeed Cyberlink remote control support" if EMBEDDED
+	tristate "TopSeed Cyberlink remote control support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
-	Say Y if you have a TopSeed Cyberlink remote control.
+	Say Y if you have a TopSeed Cyberlink or BTC Emprex remote control.
 
 config HID_THRUSTMASTER
-	tristate "ThrustMaster devices support" if EMBEDDED
+	tristate "ThrustMaster devices support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	  Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
 	  a THRUSTMASTER Ferrari GT Rumble Wheel.
@@ -357,10 +435,17 @@
 	---help---
 	Support for Wacom Graphire Bluetooth tablet.
 
+config HID_WACOM_POWER_SUPPLY
+	bool "Wacom Bluetooth devices power supply status support"
+	depends on HID_WACOM
+	select POWER_SUPPLY
+	---help---
+	  Say Y here if you want to enable power supply status monitoring for
+	  Wacom Bluetooth devices.
+
 config HID_ZEROPLUS
-	tristate "Zeroplus based game controller support" if EMBEDDED
+	tristate "Zeroplus based game controller support"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	  Say Y here if you have a Zeroplus based game controller.
 
@@ -372,6 +457,12 @@
 	  Say Y here if you have a Zeroplus based game controller and want
 	  to have force feedback support for it.
 
+config HID_ZYDACRON
+	tristate "Zydacron remote control support"
+	depends on USB_HID
+	---help---
+	Support for Zydacron remote control.
+
 endmenu
 
 endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0b2618f..22e47eae 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -26,10 +26,12 @@
 obj-$(CONFIG_HID_A4TECH)	+= hid-a4tech.o
 obj-$(CONFIG_HID_APPLE)		+= hid-apple.o
 obj-$(CONFIG_HID_BELKIN)	+= hid-belkin.o
+obj-$(CONFIG_HID_CANDO)		+= hid-cando.o
 obj-$(CONFIG_HID_CHERRY)	+= hid-cherry.o
 obj-$(CONFIG_HID_CHICONY)	+= hid-chicony.o
 obj-$(CONFIG_HID_CYPRESS)	+= hid-cypress.o
 obj-$(CONFIG_HID_DRAGONRISE)	+= hid-drff.o
+obj-$(CONFIG_HID_EGALAX)	+= hid-egalax.o
 obj-$(CONFIG_HID_EZKEY)		+= hid-ezkey.o
 obj-$(CONFIG_HID_GYRATION)	+= hid-gyration.o
 obj-$(CONFIG_HID_KENSINGTON)	+= hid-kensington.o
@@ -41,9 +43,12 @@
 obj-$(CONFIG_HID_MOSART)	+= hid-mosart.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
+obj-$(CONFIG_HID_PRODIKEYS)	+= hid-prodikeys.o
 obj-$(CONFIG_HID_QUANTA)	+= hid-quanta.o
 obj-$(CONFIG_HID_PANTHERLORD)	+= hid-pl.o
 obj-$(CONFIG_HID_PETALYNX)	+= hid-petalynx.o
+obj-$(CONFIG_HID_PICOLCD)	+= hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT_KONE)	+= hid-roccat-kone.o
 obj-$(CONFIG_HID_SAMSUNG)	+= hid-samsung.o
 obj-$(CONFIG_HID_SMARTJOYPLUS)	+= hid-sjoy.o
 obj-$(CONFIG_HID_SONY)		+= hid-sony.o
@@ -54,6 +59,7 @@
 obj-$(CONFIG_HID_TOPSEED)	+= hid-topseed.o
 obj-$(CONFIG_HID_TWINHAN)	+= hid-twinhan.o
 obj-$(CONFIG_HID_ZEROPLUS)	+= hid-zpff.o
+obj-$(CONFIG_HID_ZYDACRON)	+= hid-zydacron.o
 obj-$(CONFIG_HID_WACOM)		+= hid-wacom.o
 
 obj-$(CONFIG_USB_HID)		+= usbhid/
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index c31e0be..2a0d56b 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -1,7 +1,7 @@
 /*
  *  HID driver for 3M PCT multitouch panels
  *
- *  Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
  *
  */
 
@@ -25,7 +25,7 @@
 #include "hid-ids.h"
 
 struct mmm_finger {
-	__s32 x, y;
+	__s32 x, y, w, h;
 	__u8 rank;
 	bool touch, valid;
 };
@@ -82,7 +82,18 @@
 			/* touchscreen emulation */
 			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
 			return 1;
+		case HID_DG_WIDTH:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MAJOR);
+			return 1;
+		case HID_DG_HEIGHT:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MINOR);
+			input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+					1, 1, 0, 0);
+			return 1;
 		case HID_DG_CONTACTID:
+			field->logical_maximum = 59;
 			hid_map_usage(hi, usage, bit, max,
 					EV_ABS, ABS_MT_TRACKING_ID);
 			return 1;
@@ -128,9 +139,15 @@
 			/* this finger is just placeholder data, ignore */
 		} else if (f->touch) {
 			/* this finger is on the screen */
+			int wide = (f->w > f->h);
 			input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
 			input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
 			input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+			input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR,
+						wide ? f->w : f->h);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR,
+						wide ? f->h : f->w);
 			input_mt_sync(input);
 			/*
 			 * touchscreen emulation: maintain the age rank
@@ -197,6 +214,14 @@
 		case HID_DG_CONFIDENCE:
 			md->valid = value;
 			break;
+		case HID_DG_WIDTH:
+			if (md->valid)
+				md->f[md->curid].w = value;
+			break;
+		case HID_DG_HEIGHT:
+			if (md->valid)
+				md->f[md->curid].h = value;
+			break;
 		case HID_DG_CONTACTID:
 			if (md->valid) {
 				md->curid = value;
@@ -255,6 +280,7 @@
 
 static const struct hid_device_id mmm_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, mmm_devices);
@@ -287,5 +313,4 @@
 
 module_init(mmm_init);
 module_exit(mmm_exit);
-MODULE_LICENSE("GPL");
 
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
new file mode 100644
index 0000000..4267a6f
--- /dev/null
+++ b/drivers/hid/hid-cando.c
@@ -0,0 +1,272 @@
+/*
+ *  HID driver for Cando dual-touch panels
+ *
+ *  Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Cando dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct cando_data {
+	__u16 x, y;
+	__u8 id;
+	__s8 oldest;		/* id of the oldest finger in previous frame */
+	bool valid;		/* valid finger data, or just placeholder? */
+	bool first;		/* is this the first finger in this frame? */
+	__s8 firstid;		/* id of the first finger in the frame */
+	__u16 firstx, firsty;	/* (x, y) of the first finger in the frame */
+};
+
+static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_TIPSWITCH:
+		case HID_DG_CONTACTMAX:
+			return -1;
+		case HID_DG_INRANGE:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+		}
+		return 0;
+	}
+
+	return 0;
+}
+
+static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void cando_filter_event(struct cando_data *td, struct input_dev *input)
+{
+	td->first = !td->first; /* touchscreen emulation */
+
+	if (!td->valid) {
+		/*
+		 * touchscreen emulation: if this is the second finger and
+		 * the first was valid, the first was the oldest; if the
+		 * first was not valid and there was a valid finger in the
+		 * previous frame, this is a release.
+		 */
+		if (td->first) {
+			td->firstid = -1;
+		} else if (td->firstid >= 0) {
+			input_event(input, EV_ABS, ABS_X, td->firstx);
+			input_event(input, EV_ABS, ABS_Y, td->firsty);
+			td->oldest = td->firstid;
+		} else if (td->oldest >= 0) {
+			input_event(input, EV_KEY, BTN_TOUCH, 0);
+			td->oldest = -1;
+		}
+
+		return;
+	}
+	
+	input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+	input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+	input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+	input_mt_sync(input);
+
+	/*
+	 * touchscreen emulation: if there was no touching finger previously,
+	 * emit touch event
+	 */
+	if (td->oldest < 0) {
+		input_event(input, EV_KEY, BTN_TOUCH, 1);
+		td->oldest = td->id;
+	}
+
+	/*
+	 * touchscreen emulation: if this is the first finger, wait for the
+	 * second; the oldest is then the second if it was the oldest already
+	 * or if there was no first, the first otherwise.
+	 */
+	if (td->first) {
+		td->firstx = td->x;
+		td->firsty = td->y;
+		td->firstid = td->id;
+	} else {
+		int x, y, oldest;
+		if (td->id == td->oldest || td->firstid < 0) {
+			x = td->x;
+			y = td->y;
+			oldest = td->id;
+		} else {
+			x = td->firstx;
+			y = td->firsty;
+			oldest = td->firstid;
+		}
+		input_event(input, EV_ABS, ABS_X, x);
+		input_event(input, EV_ABS, ABS_Y, y);
+		td->oldest = oldest;
+	}
+}
+
+
+static int cando_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct cando_data *td = hid_get_drvdata(hid);
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->valid = value;
+			break;
+		case HID_DG_CONTACTID:
+			td->id = value;
+			break;
+		case HID_GD_X:
+			td->x = value;
+			break;
+		case HID_GD_Y:
+			td->y = value;
+			cando_filter_event(td, input);
+			break;
+		case HID_DG_TIPSWITCH:
+			/* avoid interference from generic hidinput handling */
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct cando_data *td;
+
+	td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+		return -ENOMEM;
+	}
+	hid_set_drvdata(hdev, td);
+	td->first = false;
+	td->oldest = -1;
+	td->valid = false;
+
+	ret = hid_parse(hdev);
+	if (!ret)
+		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+	if (ret)
+		kfree(td);
+
+	return ret;
+}
+
+static void cando_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id cando_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, cando_devices);
+
+static const struct hid_usage_id cando_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver cando_driver = {
+	.name = "cando-touch",
+	.id_table = cando_devices,
+	.probe = cando_probe,
+	.remove = cando_remove,
+	.input_mapping = cando_input_mapping,
+	.input_mapped = cando_input_mapped,
+	.usage_table = cando_grabbed_usages,
+	.event = cando_event,
+};
+
+static int __init cando_init(void)
+{
+	return hid_register_driver(&cando_driver);
+}
+
+static void __exit cando_exit(void)
+{
+	hid_unregister_driver(&cando_driver);
+}
+
+module_init(cando_init);
+module_exit(cando_exit);
+
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 143e788..e10e314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -653,10 +653,9 @@
 	if (device->driver->report_fixup)
 		device->driver->report_fixup(device, start, size);
 
-	device->rdesc = kmalloc(size, GFP_KERNEL);
+	device->rdesc = kmemdup(start, size, GFP_KERNEL);
 	if (device->rdesc == NULL)
 		return -ENOMEM;
-	memcpy(device->rdesc, start, size);
 	device->rsize = size;
 
 	parser = vmalloc(sizeof(struct hid_parser));
@@ -940,13 +939,8 @@
 	unsigned count = field->report_count;
 	unsigned offset = field->report_offset;
 	unsigned size = field->report_size;
-	unsigned bitsused = offset + count * size;
 	unsigned n;
 
-	/* make sure the unused bits in the last byte are zeros */
-	if (count > 0 && size > 0 && (bitsused % 8) != 0)
-		data[(bitsused-1)/8] &= (1 << (bitsused % 8)) - 1;
-
 	for (n = 0; n < count; n++) {
 		if (field->logical_minimum < 0)	/* signed values */
 			implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -966,6 +960,7 @@
 	if (report->id > 0)
 		*data++ = report->id;
 
+	memset(data, 0, ((report->size - 1) >> 3) + 1);
 	for (n = 0; n < report->maxfield; n++)
 		hid_output_field(report->field[n], data);
 }
@@ -1086,35 +1081,28 @@
 
 	buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
 
-	if (!buf) {
-		report = hid_get_report(report_enum, data);
+	if (!buf)
 		goto nomem;
-	}
-
-	snprintf(buf, HID_DEBUG_BUFSIZE - 1,
-			"\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
-	hid_debug_event(hid, buf);
-
-	report = hid_get_report(report_enum, data);
-	if (!report) {
-		kfree(buf);
-		return -1;
-	}
 
 	/* dump the report */
 	snprintf(buf, HID_DEBUG_BUFSIZE - 1,
-			"report %d (size %u) = ", report->id, size);
+			"\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
 	hid_debug_event(hid, buf);
+
 	for (i = 0; i < size; i++) {
 		snprintf(buf, HID_DEBUG_BUFSIZE - 1,
 				" %02x", data[i]);
 		hid_debug_event(hid, buf);
 	}
 	hid_debug_event(hid, "\n");
-
 	kfree(buf);
 
 nomem:
+	report = hid_get_report(report_enum, data);
+
+	if (!report)
+		return -1;
+
 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
 		ret = hdrv->raw_event(hid, report, data, size);
 		if (ret != 0)
@@ -1167,6 +1155,8 @@
 	unsigned int i;
 	int len;
 
+	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
+		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
 	if (hdev->bus != BUS_USB)
 		connect_mask &= ~HID_CONNECT_HIDDEV;
 	if (hid_hiddev(hdev))
@@ -1246,6 +1236,7 @@
 /* a list of devices for which there is a specialized driver on HID bus */
 static const struct hid_device_id hid_blacklist[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1290,14 +1281,19 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
@@ -1331,6 +1327,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
@@ -1342,7 +1340,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
@@ -1359,8 +1359,10 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
 
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
 	{ }
@@ -1757,7 +1759,7 @@
 
 	/* we need to kill them here, otherwise they will stay allocated to
 	 * wait for coming driver */
-	if (hid_ignore(hdev))
+	if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev))
 		return -ENODEV;
 
 	/* XXX hack, any other cleaner solution after the driver core
@@ -1765,11 +1767,12 @@
 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
 		     hdev->vendor, hdev->product, atomic_inc_return(&id));
 
+	hid_debug_register(hdev, dev_name(&hdev->dev));
 	ret = device_add(&hdev->dev);
 	if (!ret)
 		hdev->status |= HID_STAT_ADDED;
-
-	hid_debug_register(hdev, dev_name(&hdev->dev));
+	else
+		hid_debug_unregister(hdev);
 
 	return ret;
 }
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
new file mode 100644
index 0000000..f44bdc0
--- /dev/null
+++ b/drivers/hid/hid-egalax.c
@@ -0,0 +1,281 @@
+/*
+ *  HID driver for eGalax dual-touch panels
+ *
+ *  Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("eGalax dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct egalax_data {
+	__u16 x, y, z;
+	__u8 id;
+	bool first;		/* is this the first finger in the frame? */
+	bool valid;		/* valid finger data, or just placeholder? */
+	bool activity;		/* at least one active finger previously? */
+	__u16 lastx, lasty;	/* latest valid (x, y) in the frame */
+};
+
+static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_TIPSWITCH:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+		case HID_DG_INRANGE:
+		case HID_DG_CONFIDENCE:
+		case HID_DG_CONTACTCOUNT:
+		case HID_DG_CONTACTMAX:
+			return -1;
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+		case HID_DG_TIPPRESSURE:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_PRESSURE);
+			return 1;
+		}
+		return 0;
+	}
+
+	/* ignore others (from other reports we won't get anyway) */
+	return -1;
+}
+
+static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
+{
+	td->first = !td->first; /* touchscreen emulation */
+
+	if (td->valid) {
+		/* emit multitouch events */
+		input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+		input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+		input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+		input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
+
+		input_mt_sync(input);
+
+		/*
+		 * touchscreen emulation: store (x, y) as
+		 * the last valid values in this frame
+		 */
+		td->lastx = td->x;
+		td->lasty = td->y;
+	}
+
+	/*
+	 * touchscreen emulation: if this is the second finger and at least
+	 * one in this frame is valid, the latest valid in the frame is
+	 * the oldest on the panel, the one we want for single touch
+	 */
+	if (!td->first && td->activity) {
+		input_event(input, EV_ABS, ABS_X, td->lastx);
+		input_event(input, EV_ABS, ABS_Y, td->lasty);
+	}
+
+	if (!td->valid) {
+		/*
+		 * touchscreen emulation: if the first finger is invalid
+		 * and there previously was finger activity, this is a release
+		 */ 
+		if (td->first && td->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 0);
+			td->activity = false;
+		}
+		return;
+	}
+
+
+	/* touchscreen emulation: if no previous activity, emit touch event */
+	if (!td->activity) {
+		input_event(input, EV_KEY, BTN_TOUCH, 1);
+		td->activity = true;
+	}
+}
+
+
+static int egalax_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct egalax_data *td = hid_get_drvdata(hid);
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+		case HID_DG_CONFIDENCE:
+			/* avoid interference from generic hidinput handling */
+			break;
+		case HID_DG_TIPSWITCH:
+			td->valid = value;
+			break;
+		case HID_DG_TIPPRESSURE:
+			td->z = value;
+			break;
+		case HID_DG_CONTACTID:
+			td->id = value;
+			break;
+		case HID_GD_X:
+			td->x = value;
+			break;
+		case HID_GD_Y:
+			td->y = value;
+			/* this is the last field in a finger */
+			egalax_filter_event(td, input);
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/* touch emulation: this is the last field in a frame */
+			td->first = false;
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct egalax_data *td;
+	struct hid_report *report;
+
+	td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+		return -ENOMEM;
+	}
+	hid_set_drvdata(hdev, td);
+
+	ret = hid_parse(hdev);
+	if (ret)
+		goto end;
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret)
+		goto end;
+
+	report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5]; 
+	if (report) {
+		report->field[0]->value[0] = 2;
+		usbhid_submit_report(hdev, report, USB_DIR_OUT);
+	}
+
+end:
+	if (ret)
+		kfree(td);
+
+	return ret;
+}
+
+static void egalax_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id egalax_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, egalax_devices);
+
+static const struct hid_usage_id egalax_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver egalax_driver = {
+	.name = "egalax-touch",
+	.id_table = egalax_devices,
+	.probe = egalax_probe,
+	.remove = egalax_remove,
+	.input_mapping = egalax_input_mapping,
+	.input_mapped = egalax_input_mapped,
+	.usage_table = egalax_grabbed_usages,
+	.event = egalax_event,
+};
+
+static int __init egalax_init(void)
+{
+	return hid_register_driver(&egalax_driver);
+}
+
+static void __exit egalax_exit(void)
+{
+	hid_unregister_driver(&egalax_driver);
+}
+
+module_init(egalax_init);
+module_exit(egalax_exit);
+
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 09d2764..9776896 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -20,6 +20,7 @@
 
 #define USB_VENDOR_ID_3M		0x0596
 #define USB_DEVICE_ID_3M1968		0x0500
+#define USB_DEVICE_ID_3M2256		0x0502
 
 #define USB_VENDOR_ID_A4TECH		0x09da
 #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
@@ -123,6 +124,13 @@
 #define USB_VENDOR_ID_BERKSHIRE		0x0c98
 #define USB_DEVICE_ID_BERKSHIRE_PCWD	0x1140
 
+#define USB_VENDOR_ID_BTC		0x046e
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE	0x5578
+
+#define USB_VENDOR_ID_CANDO		0x2087
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+
 #define USB_VENDOR_ID_CH		0x068e
 #define USB_DEVICE_ID_CH_PRO_PEDALS	0x00f2
 #define USB_DEVICE_ID_CH_COMBATSTICK	0x00f4
@@ -148,6 +156,9 @@
 #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST	0x1500
 #define USB_DEVICE_ID_CODEMERCS_IOW_LAST	0x15ff
 
+#define USB_VENDOR_ID_CREATIVELABS	0x041e
+#define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
+
 #define USB_VENDOR_ID_CYGNAL		0x10c4
 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X	0x818a
 
@@ -171,6 +182,10 @@
 
 #define USB_VENDOR_ID_DRAGONRISE	0x0079
 
+#define USB_VENDOR_ID_DWAV		0x0eef
+#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH	0x480d
+
 #define USB_VENDOR_ID_ELO		0x04E7
 #define USB_DEVICE_ID_ELO_TS2700	0x0020
 
@@ -342,6 +357,8 @@
 #define USB_VENDOR_ID_MICROCHIP		0x04d8
 #define USB_DEVICE_ID_PICKIT1		0x0032
 #define USB_DEVICE_ID_PICKIT2		0x0033
+#define USB_DEVICE_ID_PICOLCD		0xc002
+#define USB_DEVICE_ID_PICOLCD_BOOTLOADER	0xf002
 
 #define USB_VENDOR_ID_MICROSOFT		0x045e
 #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
@@ -400,6 +417,9 @@
 #define USB_VENDOR_ID_PRODIGE		0x05af
 #define USB_DEVICE_ID_PRODIGE_CORDLESS	0x3062
 
+#define USB_VENDOR_ID_ROCCAT		0x1e7d
+#define USB_DEVICE_ID_ROCCAT_KONE	0x2ced
+
 #define USB_VENDOR_ID_SAITEK		0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
 
@@ -409,6 +429,7 @@
 
 #define USB_VENDOR_ID_SAMSUNG		0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE	0x0600
 
 #define USB_VENDOR_ID_SONY			0x054c
 #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE	0x024b
@@ -457,6 +478,7 @@
 
 #define USB_VENDOR_ID_WACOM		0x056a
 #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH	0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH	0xbd
 
 #define USB_VENDOR_ID_WISEGROUP		0x0925
 #define USB_DEVICE_ID_SMARTJOY_PLUS	0x0005
@@ -475,6 +497,9 @@
 
 #define USB_VENDOR_ID_ZEROPLUS		0x0c12
 
+#define USB_VENDOR_ID_ZYDACRON	0x13EC
+#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL	0x0006
+
 #define USB_VENDOR_ID_KYE		0x0458
 #define USB_DEVICE_ID_KYE_ERGO_525V	0x0087
 #define USB_DEVICE_ID_KYE_GPEN_560	0x5003
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 3677c90..f6433d8 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -126,6 +126,9 @@
 	case 0x1004: lg_map_key_clear(KEY_VIDEO);		break;
 	case 0x1005: lg_map_key_clear(KEY_AUDIO);		break;
 	case 0x100a: lg_map_key_clear(KEY_DOCUMENTS);		break;
+	/* The following two entries are Playlist 1 and 2 on the MX3200 */
+	case 0x100f: lg_map_key_clear(KEY_FN_1);		break;
+	case 0x1010: lg_map_key_clear(KEY_FN_2);		break;
 	case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG);	break;
 	case 0x1012: lg_map_key_clear(KEY_NEXTSONG);		break;
 	case 0x1013: lg_map_key_clear(KEY_CAMERA);		break;
@@ -137,6 +140,7 @@
 	case 0x1019: lg_map_key_clear(KEY_PROG1);		break;
 	case 0x101a: lg_map_key_clear(KEY_PROG2);		break;
 	case 0x101b: lg_map_key_clear(KEY_PROG3);		break;
+	case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS);	break;
 	case 0x101f: lg_map_key_clear(KEY_ZOOMIN);		break;
 	case 0x1020: lg_map_key_clear(KEY_ZOOMOUT);		break;
 	case 0x1021: lg_map_key_clear(KEY_ZOOMRESET);		break;
@@ -147,6 +151,11 @@
 	case 0x1029: lg_map_key_clear(KEY_SHUFFLE);		break;
 	case 0x102a: lg_map_key_clear(KEY_BACK);		break;
 	case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS);	break;
+	case 0x102d: lg_map_key_clear(KEY_WWW);			break;
+	/* The following two are 'Start/answer call' and 'End/reject call'
+	   on the MX3200 */
+	case 0x1031: lg_map_key_clear(KEY_OK);			break;
+	case 0x1032: lg_map_key_clear(KEY_CANCEL);		break;
 	case 0x1041: lg_map_key_clear(KEY_BATTERY);		break;
 	case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR);	break;
 	case 0x1043: lg_map_key_clear(KEY_SPREADSHEET);		break;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0d471fc2..f10d56a 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -354,12 +354,15 @@
 		goto err_free;
 	}
 
-	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
 		dev_err(&hdev->dev, "magicmouse hw start failed\n");
 		goto err_free;
 	}
 
+	/* we are handling the input ourselves */
+	hidinput_disconnect(hdev);
+
 	report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
 	if (!report) {
 		dev_err(&hdev->dev, "unable to register touch report\n");
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 4777bbf..b6b0cae 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -24,6 +24,34 @@
 
 #define NTRIG_DUPLICATE_USAGES	0x001
 
+static unsigned int min_width;
+module_param(min_width, uint, 0644);
+MODULE_PARM_DESC(min_width, "Minimum touch contact width to accept.");
+
+static unsigned int min_height;
+module_param(min_height, uint, 0644);
+MODULE_PARM_DESC(min_height, "Minimum touch contact height to accept.");
+
+static unsigned int activate_slack = 1;
+module_param(activate_slack, uint, 0644);
+MODULE_PARM_DESC(activate_slack, "Number of touch frames to ignore at "
+		 "the start of touch input.");
+
+static unsigned int deactivate_slack = 4;
+module_param(deactivate_slack, uint, 0644);
+MODULE_PARM_DESC(deactivate_slack, "Number of empty frames to ignore before "
+		 "deactivating touch.");
+
+static unsigned int activation_width = 64;
+module_param(activation_width, uint, 0644);
+MODULE_PARM_DESC(activation_width, "Width threshold to immediately start "
+		 "processing touch events.");
+
+static unsigned int activation_height = 32;
+module_param(activation_height, uint, 0644);
+MODULE_PARM_DESC(activation_height, "Height threshold to immediately start "
+		 "processing touch events.");
+
 struct ntrig_data {
 	/* Incoming raw values for a single contact */
 	__u16 x, y, w, h;
@@ -37,6 +65,309 @@
 
 	__u8 mt_footer[4];
 	__u8 mt_foot_count;
+
+	/* The current activation state. */
+	__s8 act_state;
+
+	/* Empty frames to ignore before recognizing the end of activity */
+	__s8 deactivate_slack;
+
+	/* Frames to ignore before acknowledging the start of activity */
+	__s8 activate_slack;
+
+	/* Minimum size contact to accept */
+	__u16 min_width;
+	__u16 min_height;
+
+	/* Threshold to override activation slack */
+	__u16 activation_width;
+	__u16 activation_height;
+
+	__u16 sensor_logical_width;
+	__u16 sensor_logical_height;
+	__u16 sensor_physical_width;
+	__u16 sensor_physical_height;
+};
+
+
+static ssize_t show_phys_width(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->sensor_physical_width);
+}
+
+static DEVICE_ATTR(sensor_physical_width, S_IRUGO, show_phys_width, NULL);
+
+static ssize_t show_phys_height(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->sensor_physical_height);
+}
+
+static DEVICE_ATTR(sensor_physical_height, S_IRUGO, show_phys_height, NULL);
+
+static ssize_t show_log_width(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->sensor_logical_width);
+}
+
+static DEVICE_ATTR(sensor_logical_width, S_IRUGO, show_log_width, NULL);
+
+static ssize_t show_log_height(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->sensor_logical_height);
+}
+
+static DEVICE_ATTR(sensor_logical_height, S_IRUGO, show_log_height, NULL);
+
+static ssize_t show_min_width(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->min_width *
+				    nd->sensor_physical_width /
+				    nd->sensor_logical_width);
+}
+
+static ssize_t set_min_width(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > nd->sensor_physical_width)
+		return -EINVAL;
+
+	nd->min_width = val * nd->sensor_logical_width /
+			      nd->sensor_physical_width;
+
+	return count;
+}
+
+static DEVICE_ATTR(min_width, S_IWUSR | S_IRUGO, show_min_width, set_min_width);
+
+static ssize_t show_min_height(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->min_height *
+				    nd->sensor_physical_height /
+				    nd->sensor_logical_height);
+}
+
+static ssize_t set_min_height(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > nd->sensor_physical_height)
+		return -EINVAL;
+
+	nd->min_height = val * nd->sensor_logical_height /
+			       nd->sensor_physical_height;
+
+	return count;
+}
+
+static DEVICE_ATTR(min_height, S_IWUSR | S_IRUGO, show_min_height,
+		   set_min_height);
+
+static ssize_t show_activate_slack(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->activate_slack);
+}
+
+static ssize_t set_activate_slack(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > 0x7f)
+		return -EINVAL;
+
+	nd->activate_slack = val;
+
+	return count;
+}
+
+static DEVICE_ATTR(activate_slack, S_IWUSR | S_IRUGO, show_activate_slack,
+		   set_activate_slack);
+
+static ssize_t show_activation_width(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->activation_width *
+				    nd->sensor_physical_width /
+				    nd->sensor_logical_width);
+}
+
+static ssize_t set_activation_width(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > nd->sensor_physical_width)
+		return -EINVAL;
+
+	nd->activation_width = val * nd->sensor_logical_width /
+				     nd->sensor_physical_width;
+
+	return count;
+}
+
+static DEVICE_ATTR(activation_width, S_IWUSR | S_IRUGO, show_activation_width,
+		   set_activation_width);
+
+static ssize_t show_activation_height(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", nd->activation_height *
+				    nd->sensor_physical_height /
+				    nd->sensor_logical_height);
+}
+
+static ssize_t set_activation_height(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > nd->sensor_physical_height)
+		return -EINVAL;
+
+	nd->activation_height = val * nd->sensor_logical_height /
+				      nd->sensor_physical_height;
+
+	return count;
+}
+
+static DEVICE_ATTR(activation_height, S_IWUSR | S_IRUGO,
+		   show_activation_height, set_activation_height);
+
+static ssize_t show_deactivate_slack(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	return sprintf(buf, "%d\n", -nd->deactivate_slack);
+}
+
+static ssize_t set_deactivate_slack(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	/*
+	 * No more than 8 terminal frames have been observed so far
+	 * and higher slack is highly likely to leave the single
+	 * touch emulation stuck down.
+	 */
+	if (val > 7)
+		return -EINVAL;
+
+	nd->deactivate_slack = -val;
+
+	return count;
+}
+
+static DEVICE_ATTR(deactivate_slack, S_IWUSR | S_IRUGO, show_deactivate_slack,
+		   set_deactivate_slack);
+
+static struct attribute *sysfs_attrs[] = {
+	&dev_attr_sensor_physical_width.attr,
+	&dev_attr_sensor_physical_height.attr,
+	&dev_attr_sensor_logical_width.attr,
+	&dev_attr_sensor_logical_height.attr,
+	&dev_attr_min_height.attr,
+	&dev_attr_min_width.attr,
+	&dev_attr_activate_slack.attr,
+	&dev_attr_activation_width.attr,
+	&dev_attr_activation_height.attr,
+	&dev_attr_deactivate_slack.attr,
+	NULL
+};
+
+static struct attribute_group ntrig_attribute_group = {
+	.attrs = sysfs_attrs
 };
 
 /*
@@ -49,6 +380,8 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
+	struct ntrig_data *nd = hid_get_drvdata(hdev);
+
 	/* No special mappings needed for the pen and single touch */
 	if (field->physical)
 		return 0;
@@ -62,6 +395,21 @@
 			input_set_abs_params(hi->input, ABS_X,
 					field->logical_minimum,
 					field->logical_maximum, 0, 0);
+
+			if (!nd->sensor_logical_width) {
+				nd->sensor_logical_width =
+					field->logical_maximum -
+					field->logical_minimum;
+				nd->sensor_physical_width =
+					field->physical_maximum -
+					field->physical_minimum;
+				nd->activation_width = activation_width *
+					nd->sensor_logical_width /
+					nd->sensor_physical_width;
+				nd->min_width = min_width *
+					nd->sensor_logical_width /
+					nd->sensor_physical_width;
+			}
 			return 1;
 		case HID_GD_Y:
 			hid_map_usage(hi, usage, bit, max,
@@ -69,6 +417,21 @@
 			input_set_abs_params(hi->input, ABS_Y,
 					field->logical_minimum,
 					field->logical_maximum, 0, 0);
+
+			if (!nd->sensor_logical_height) {
+				nd->sensor_logical_height =
+					field->logical_maximum -
+					field->logical_minimum;
+				nd->sensor_physical_height =
+					field->physical_maximum -
+					field->physical_minimum;
+				nd->activation_height = activation_height *
+					nd->sensor_logical_height /
+					nd->sensor_physical_height;
+				nd->min_height = min_height *
+					nd->sensor_logical_height /
+					nd->sensor_physical_height;
+			}
 			return 1;
 		}
 		return 0;
@@ -201,20 +564,68 @@
 			if (nd->mt_foot_count != 4)
 				break;
 
-			/* Pen activity signal, trigger end of touch. */
+			/* Pen activity signal. */
 			if (nd->mt_footer[2]) {
+				/*
+				 * When the pen deactivates touch, we see a
+				 * bogus frame with ContactCount > 0.
+				 * We can
+				 * save a bit of work by ensuring act_state < 0
+				 * even if deactivation slack is turned off.
+				 */
+				nd->act_state = deactivate_slack - 1;
 				nd->confidence = 0;
 				break;
 			}
 
-			/* If the contact was invalid */
-			if (!(nd->confidence && nd->mt_footer[0])
-					|| nd->w <= 250
-					|| nd->h <= 190) {
-				nd->confidence = 0;
+			/*
+			 * The first footer value indicates the presence of a
+			 * finger.
+			 */
+			if (nd->mt_footer[0]) {
+				/*
+				 * We do not want to process contacts under
+				 * the size threshold, but do not want to
+				 * ignore them for activation state
+				 */
+				if (nd->w < nd->min_width ||
+				    nd->h < nd->min_height)
+					nd->confidence = 0;
+			} else
 				break;
+
+			if (nd->act_state > 0) {
+				/*
+				 * Contact meets the activation size threshold
+				 */
+				if (nd->w >= nd->activation_width &&
+				    nd->h >= nd->activation_height) {
+					if (nd->id)
+						/*
+						 * first contact, activate now
+						 */
+						nd->act_state = 0;
+					else {
+						/*
+						 * avoid corrupting this frame
+						 * but ensure next frame will
+						 * be active
+						 */
+						nd->act_state = 1;
+						break;
+					}
+				} else
+					/*
+					 * Defer adjusting the activation state
+					 * until the end of the frame.
+					 */
+					break;
 			}
 
+			/* Discarding this contact */
+			if (!nd->confidence)
+				break;
+
 			/* emit a normal (X, Y) for the first point only */
 			if (nd->id == 0) {
 				/*
@@ -227,8 +638,15 @@
 				input_event(input, EV_ABS, ABS_X, nd->x);
 				input_event(input, EV_ABS, ABS_Y, nd->y);
 			}
+
+			/* Emit MT events */
 			input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
 			input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+			/*
+			 * Translate from height and width to size
+			 * and orientation.
+			 */
 			if (nd->w > nd->h) {
 				input_event(input, EV_ABS,
 						ABS_MT_ORIENTATION, 1);
@@ -248,12 +666,88 @@
 			break;
 
 		case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
-			if (!nd->reading_mt)
+			if (!nd->reading_mt) /* Just to be sure */
 				break;
 
 			nd->reading_mt = 0;
 
-			if (nd->first_contact_touch) {
+
+			/*
+			 * Activation state machine logic:
+			 *
+			 * Fundamental states:
+			 *	state >  0: Inactive
+			 *	state <= 0: Active
+			 *	state <  -deactivate_slack:
+			 *		 Pen termination of touch
+			 *
+			 * Specific values of interest
+			 *	state == activate_slack
+			 *		 no valid input since the last reset
+			 *
+			 *	state == 0
+			 *		 general operational state
+			 *
+			 *	state == -deactivate_slack
+			 *		 read sufficient empty frames to accept
+			 *		 the end of input and reset
+			 */
+
+			if (nd->act_state > 0) { /* Currently inactive */
+				if (value)
+					/*
+					 * Consider each live contact as
+					 * evidence of intentional activity.
+					 */
+					nd->act_state = (nd->act_state > value)
+							? nd->act_state - value
+							: 0;
+				else
+					/*
+					 * Empty frame before we hit the
+					 * activity threshold, reset.
+					 */
+					nd->act_state = nd->activate_slack;
+
+				/*
+				 * Entered this block inactive and no
+				 * coordinates sent this frame, so hold off
+				 * on button state.
+				 */
+				break;
+			} else { /* Currently active */
+				if (value && nd->act_state >=
+					     nd->deactivate_slack)
+					/*
+					 * Live point: clear accumulated
+					 * deactivation count.
+					 */
+					nd->act_state = 0;
+				else if (nd->act_state <= nd->deactivate_slack)
+					/*
+					 * We've consumed the deactivation
+					 * slack, time to deactivate and reset.
+					 */
+					nd->act_state =
+						nd->activate_slack;
+				else { /* Move towards deactivation */
+					nd->act_state--;
+					break;
+				}
+			}
+
+			if (nd->first_contact_touch && nd->act_state <= 0) {
+				/*
+				 * Check to see if we're ready to start
+				 * emitting touch events.
+				 *
+				 * Note: activation slack will decrease over
+				 * the course of the frame, and it will be
+				 * inconsistent from the start to the end of
+				 * the frame.  However if the frame starts
+				 * with slack, first_contact_touch will still
+				 * be 0 and we will not get to this point.
+				 */
 				input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
 				input_report_key(input, BTN_TOUCH, 1);
 			} else {
@@ -263,7 +757,7 @@
 			break;
 
 		default:
-			/* fallback to the generic hidinput handling */
+			/* fall-back to the generic hidinput handling */
 			return 0;
 		}
 	}
@@ -293,6 +787,16 @@
 	}
 
 	nd->reading_mt = 0;
+	nd->min_width = 0;
+	nd->min_height = 0;
+	nd->activate_slack = activate_slack;
+	nd->act_state = activate_slack;
+	nd->deactivate_slack = -deactivate_slack;
+	nd->sensor_logical_width = 0;
+	nd->sensor_logical_height = 0;
+	nd->sensor_physical_width = 0;
+	nd->sensor_physical_height = 0;
+
 	hid_set_drvdata(hdev, nd);
 
 	ret = hid_parse(hdev);
@@ -344,6 +848,8 @@
 	if (report)
 		usbhid_submit_report(hdev, report, USB_DIR_OUT);
 
+	ret = sysfs_create_group(&hdev->dev.kobj,
+			&ntrig_attribute_group);
 
 	return 0;
 err_free:
@@ -353,6 +859,8 @@
 
 static void ntrig_remove(struct hid_device *hdev)
 {
+	sysfs_remove_group(&hdev->dev.kobj,
+			&ntrig_attribute_group);
 	hid_hw_stop(hdev);
 	kfree(hid_get_drvdata(hdev));
 }
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
new file mode 100644
index 0000000..7aabf65
--- /dev/null
+++ b/drivers/hid/hid-picolcd.c
@@ -0,0 +1,2631 @@
+/***************************************************************************
+ *   Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org>       *
+ *                                                                         *
+ *   Based on Logitech G13 driver (v0.4)                                   *
+ *     Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu>   *
+ *                                                                         *
+ *   This program is free software: you can redistribute it and/or modify  *
+ *   it under the terms of the GNU General Public License as published by  *
+ *   the Free Software Foundation, version 2 of the License.               *
+ *                                                                         *
+ *   This driver is distributed in the hope that it will be useful, but    *
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of            *
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU      *
+ *   General Public License for more details.                              *
+ *                                                                         *
+ *   You should have received a copy of the GNU General Public License     *
+ *   along with this software. If not see <http://www.gnu.org/licenses/>.  *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/hid-debug.h>
+#include <linux/input.h>
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/leds.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+
+#define PICOLCD_NAME "PicoLCD (graphic)"
+
+/* Report numbers */
+#define REPORT_ERROR_CODE      0x10 /* LCD: IN[16]  */
+#define   ERR_SUCCESS            0x00
+#define   ERR_PARAMETER_MISSING  0x01
+#define   ERR_DATA_MISSING       0x02
+#define   ERR_BLOCK_READ_ONLY    0x03
+#define   ERR_BLOCK_NOT_ERASABLE 0x04
+#define   ERR_BLOCK_TOO_BIG      0x05
+#define   ERR_SECTION_OVERFLOW   0x06
+#define   ERR_INVALID_CMD_LEN    0x07
+#define   ERR_INVALID_DATA_LEN   0x08
+#define REPORT_KEY_STATE       0x11 /* LCD: IN[2]   */
+#define REPORT_IR_DATA         0x21 /* LCD: IN[63]  */
+#define REPORT_EE_DATA         0x32 /* LCD: IN[63]  */
+#define REPORT_MEMORY          0x41 /* LCD: IN[63]  */
+#define REPORT_LED_STATE       0x81 /* LCD: OUT[1]  */
+#define REPORT_BRIGHTNESS      0x91 /* LCD: OUT[1]  */
+#define REPORT_CONTRAST        0x92 /* LCD: OUT[1]  */
+#define REPORT_RESET           0x93 /* LCD: OUT[2]  */
+#define REPORT_LCD_CMD         0x94 /* LCD: OUT[63] */
+#define REPORT_LCD_DATA        0x95 /* LCD: OUT[63] */
+#define REPORT_LCD_CMD_DATA    0x96 /* LCD: OUT[63] */
+#define	REPORT_EE_READ         0xa3 /* LCD: OUT[63] */
+#define REPORT_EE_WRITE        0xa4 /* LCD: OUT[63] */
+#define REPORT_ERASE_MEMORY    0xb2 /* LCD: OUT[2]  */
+#define REPORT_READ_MEMORY     0xb3 /* LCD: OUT[3]  */
+#define REPORT_WRITE_MEMORY    0xb4 /* LCD: OUT[63] */
+#define REPORT_SPLASH_RESTART  0xc1 /* LCD: OUT[1]  */
+#define REPORT_EXIT_KEYBOARD   0xef /* LCD: OUT[2]  */
+#define REPORT_VERSION         0xf1 /* LCD: IN[2],OUT[1]    Bootloader: IN[2],OUT[1]   */
+#define REPORT_BL_ERASE_MEMORY 0xf2 /*                      Bootloader: IN[36],OUT[4]  */
+#define REPORT_BL_READ_MEMORY  0xf3 /*                      Bootloader: IN[36],OUT[4]  */
+#define REPORT_BL_WRITE_MEMORY 0xf4 /*                      Bootloader: IN[36],OUT[36] */
+#define REPORT_DEVID           0xf5 /* LCD: IN[5], OUT[1]   Bootloader: IN[5],OUT[1]   */
+#define REPORT_SPLASH_SIZE     0xf6 /* LCD: IN[4], OUT[1]   */
+#define REPORT_HOOK_VERSION    0xf7 /* LCD: IN[2], OUT[1]   */
+#define REPORT_EXIT_FLASHER    0xff /*                      Bootloader: OUT[2]         */
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ *       Chip 1           Chip 2           Chip 3           Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * |     Tile 1     |     Tile 1     |     Tile 1     |     Tile 1     |
+ * +----------------+----------------+----------------+----------------+
+ * |     Tile 2     |     Tile 2     |     Tile 2     |     Tile 2     |
+ * +----------------+----------------+----------------+----------------+
+ *                                  ...
+ * +----------------+----------------+----------------+----------------+
+ * |     Tile 8     |     Tile 8     |     Tile 8     |     Tile 8     |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT   10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT  2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+	.id          = PICOLCDFB_NAME,
+	.type        = FB_TYPE_PACKED_PIXELS,
+	.visual      = FB_VISUAL_MONO01,
+	.xpanstep    = 0,
+	.ypanstep    = 0,
+	.ywrapstep   = 0,
+	.line_length = PICOLCDFB_WIDTH / 8,
+	.accel       = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+	.xres           = PICOLCDFB_WIDTH,
+	.yres           = PICOLCDFB_HEIGHT,
+	.xres_virtual   = PICOLCDFB_WIDTH,
+	.yres_virtual   = PICOLCDFB_HEIGHT,
+	.width          = 103,
+	.height         = 26,
+	.bits_per_pixel = 1,
+	.grayscale      = 1,
+};
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+/* Input device
+ *
+ * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
+ * and header for 4x4 key matrix. The built-in keys are part of the matrix.
+ */
+static const unsigned short def_keymap[] = {
+	KEY_RESERVED,	/* none */
+	KEY_BACK,	/* col 4 + row 1 */
+	KEY_HOMEPAGE,	/* col 3 + row 1 */
+	KEY_RESERVED,	/* col 2 + row 1 */
+	KEY_RESERVED,	/* col 1 + row 1 */
+	KEY_SCROLLUP,	/* col 4 + row 2 */
+	KEY_OK,		/* col 3 + row 2 */
+	KEY_SCROLLDOWN,	/* col 2 + row 2 */
+	KEY_RESERVED,	/* col 1 + row 2 */
+	KEY_RESERVED,	/* col 4 + row 3 */
+	KEY_RESERVED,	/* col 3 + row 3 */
+	KEY_RESERVED,	/* col 2 + row 3 */
+	KEY_RESERVED,	/* col 1 + row 3 */
+	KEY_RESERVED,	/* col 4 + row 4 */
+	KEY_RESERVED,	/* col 3 + row 4 */
+	KEY_RESERVED,	/* col 2 + row 4 */
+	KEY_RESERVED,	/* col 1 + row 4 */
+};
+#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
+
+/* Description of in-progress IO operation, used for operations
+ * that trigger response from device */
+struct picolcd_pending {
+	struct hid_report *out_report;
+	struct hid_report *in_report;
+	struct completion ready;
+	int raw_size;
+	u8 raw_data[64];
+};
+
+/* Per device data structure */
+struct picolcd_data {
+	struct hid_device *hdev;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debug_reset;
+	struct dentry *debug_eeprom;
+	struct dentry *debug_flash;
+	struct mutex mutex_flash;
+	int addr_sz;
+#endif
+	u8 version[2];
+	unsigned short opmode_delay;
+	/* input stuff */
+	u8 pressed_keys[2];
+	struct input_dev *input_keys;
+	struct input_dev *input_cir;
+	unsigned short keycode[PICOLCD_KEYS];
+
+#ifdef CONFIG_HID_PICOLCD_FB
+	/* Framebuffer stuff */
+	u8 fb_update_rate;
+	u8 fb_bpp;
+	u8 *fb_vbitmap;		/* local copy of what was sent to PicoLCD */
+	u8 *fb_bitmap;		/* framebuffer */
+	struct fb_info *fb_info;
+	struct fb_deferred_io fb_defio;
+#endif /* CONFIG_HID_PICOLCD_FB */
+#ifdef CONFIG_HID_PICOLCD_LCD
+	struct lcd_device *lcd;
+	u8 lcd_contrast;
+#endif /* CONFIG_HID_PICOLCD_LCD */
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+	struct backlight_device *backlight;
+	u8 lcd_brightness;
+	u8 lcd_power;
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+#ifdef CONFIG_HID_PICOLCD_LEDS
+	/* LED stuff */
+	u8 led_state;
+	struct led_classdev *led[8];
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+	/* Housekeeping stuff */
+	spinlock_t lock;
+	struct mutex mutex;
+	struct picolcd_pending *pending;
+	int status;
+#define PICOLCD_BOOTLOADER 1
+#define PICOLCD_FAILED 2
+#define PICOLCD_READY_FB 4
+};
+
+
+/* Find a given report */
+#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
+#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
+
+static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
+{
+	struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
+	struct hid_report *report = NULL;
+
+	list_for_each_entry(report, feature_report_list, list) {
+		if (report->id == id)
+			return report;
+	}
+	dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+	return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void picolcd_debug_out_report(struct picolcd_data *data,
+		struct hid_device *hdev, struct hid_report *report);
+#define usbhid_submit_report(a, b, c) \
+	do { \
+		picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
+		usbhid_submit_report(a, b, c); \
+	} while (0)
+#endif
+
+/* Submit a report and wait for a reply from device - if device fades away
+ * or does not respond in time, return NULL */
+static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
+		int report_id, const u8 *raw_data, int size)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	struct picolcd_pending *work;
+	struct hid_report *report = picolcd_out_report(report_id, hdev);
+	unsigned long flags;
+	int i, j, k;
+
+	if (!report || !data)
+		return NULL;
+	if (data->status & PICOLCD_FAILED)
+		return NULL;
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return NULL;
+
+	init_completion(&work->ready);
+	work->out_report = report;
+	work->in_report  = NULL;
+	work->raw_size   = 0;
+
+	mutex_lock(&data->mutex);
+	spin_lock_irqsave(&data->lock, flags);
+	for (i = k = 0; i < report->maxfield; i++)
+		for (j = 0; j < report->field[i]->report_count; j++) {
+			hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
+			k++;
+		}
+	data->pending = work;
+	usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+	wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
+	spin_lock_irqsave(&data->lock, flags);
+	data->pending = NULL;
+	spin_unlock_irqrestore(&data->lock, flags);
+	mutex_unlock(&data->mutex);
+	return work;
+}
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
+	struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
+	unsigned long flags;
+	u8 *tdata;
+	int i;
+
+	if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
+		return -ENODEV;
+
+	spin_lock_irqsave(&data->lock, flags);
+	hid_set_field(report1->field[0],  0, chip << 2);
+	hid_set_field(report1->field[0],  1, 0x02);
+	hid_set_field(report1->field[0],  2, 0x00);
+	hid_set_field(report1->field[0],  3, 0x00);
+	hid_set_field(report1->field[0],  4, 0xb8 | tile);
+	hid_set_field(report1->field[0],  5, 0x00);
+	hid_set_field(report1->field[0],  6, 0x00);
+	hid_set_field(report1->field[0],  7, 0x40);
+	hid_set_field(report1->field[0],  8, 0x00);
+	hid_set_field(report1->field[0],  9, 0x00);
+	hid_set_field(report1->field[0], 10,   32);
+
+	hid_set_field(report2->field[0],  0, (chip << 2) | 0x01);
+	hid_set_field(report2->field[0],  1, 0x00);
+	hid_set_field(report2->field[0],  2, 0x00);
+	hid_set_field(report2->field[0],  3,   32);
+
+	tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
+	for (i = 0; i < 64; i++)
+		if (i < 32)
+			hid_set_field(report1->field[0], 11 + i, tdata[i]);
+		else
+			hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+	usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+	usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+	return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+		int chip, int tile)
+{
+	int i, b, changed = 0;
+	u8 tdata[64];
+	u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+	if (bpp == 1) {
+		for (b = 7; b >= 0; b--) {
+			const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+			for (i = 0; i < 64; i++) {
+				tdata[i] <<= 1;
+				tdata[i] |= (bdata[i/8] >> (7 - i % 8)) & 0x01;
+			}
+		}
+	} else if (bpp == 8) {
+		for (b = 7; b >= 0; b--) {
+			const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+			for (i = 0; i < 64; i++) {
+				tdata[i] <<= 1;
+				tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+			}
+		}
+	} else {
+		/* Oops, we should never get here! */
+		WARN_ON(1);
+		return 0;
+	}
+
+	for (i = 0; i < 64; i++)
+		if (tdata[i] != vdata[i]) {
+			changed = 1;
+			vdata[i] = tdata[i];
+		}
+	return changed;
+}
+
+/* Reconfigure LCD display */
+static int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+	struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+	int i, j;
+	unsigned long flags;
+	static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+	if (!report || report->maxfield != 1)
+		return -ENODEV;
+
+	spin_lock_irqsave(&data->lock, flags);
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < report->field[0]->maxusage; j++)
+			if (j == 0)
+				hid_set_field(report->field[0], j, i << 2);
+			else if (j < sizeof(mapcmd))
+				hid_set_field(report->field[0], j, mapcmd[j]);
+			else
+				hid_set_field(report->field[0], j, 0);
+		usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	}
+
+	data->status |= PICOLCD_READY_FB;
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	if (data->fb_bitmap) {
+		if (clear) {
+			memset(data->fb_vbitmap, 0xff, PICOLCDFB_SIZE);
+			memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
+		} else {
+			/* invert 1 byte in each tile to force resend */
+			for (i = 0; i < PICOLCDFB_SIZE; i += 64)
+				data->fb_vbitmap[i] = ~data->fb_vbitmap[i];
+		}
+	}
+
+	/* schedule first output of framebuffer */
+	if (data->fb_info)
+		schedule_delayed_work(&data->fb_info->deferred_work, 0);
+
+	return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct picolcd_data *data)
+{
+	int chip, tile, n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->lock, flags);
+	if (!(data->status & PICOLCD_READY_FB)) {
+		spin_unlock_irqrestore(&data->lock, flags);
+		picolcd_fb_reset(data, 0);
+	} else {
+		spin_unlock_irqrestore(&data->lock, flags);
+	}
+
+	/*
+	 * Translate the framebuffer into the format needed by the PicoLCD.
+	 * See display layout above.
+	 * Do this one tile after the other and push those tiles that changed.
+	 *
+	 * Wait for our IO to complete as otherwise we might flood the queue!
+	 */
+	n = 0;
+	for (chip = 0; chip < 4; chip++)
+		for (tile = 0; tile < 8; tile++)
+			if (picolcd_fb_update_tile(data->fb_vbitmap,
+					data->fb_bitmap, data->fb_bpp, chip, tile)) {
+				n += 2;
+				if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+					usbhid_wait_io(data->hdev);
+					n = 0;
+				}
+				picolcd_fb_send_tile(data->hdev, chip, tile);
+			}
+	if (n)
+		usbhid_wait_io(data->hdev);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+		const struct fb_fillrect *rect)
+{
+	if (!info->par)
+		return;
+	sys_fillrect(info, rect);
+
+	schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+		const struct fb_copyarea *area)
+{
+	if (!info->par)
+		return;
+	sys_copyarea(info, area);
+
+	schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	if (!info->par)
+		return;
+	sys_imageblit(info, image);
+
+	schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	if (!info->par)
+		return -ENODEV;
+	ret = fb_sys_write(info, buf, count, ppos);
+	if (ret >= 0)
+		schedule_delayed_work(&info->deferred_work, 0);
+	return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+	if (!info->par)
+		return -ENODEV;
+	/* We let fb notification do this for us via lcd/backlight device */
+	return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+	struct picolcd_data *data = info->par;
+	info->par = NULL;
+	if (data)
+		data->fb_info = NULL;
+	fb_deferred_io_cleanup(info);
+	framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	__u32 bpp      = var->bits_per_pixel;
+	__u32 activate = var->activate;
+
+	/* only allow 1/8 bit depth (8-bit is grayscale) */
+	*var = picolcdfb_var;
+	var->activate = activate;
+	if (bpp >= 8)
+		var->bits_per_pixel = 8;
+	else
+		var->bits_per_pixel = 1;
+	return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+	struct picolcd_data *data = info->par;
+	u8 *o_fb, *n_fb;
+	if (info->var.bits_per_pixel == data->fb_bpp)
+		return 0;
+	/* switch between 1/8 bit depths */
+	if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+		return -EINVAL;
+
+	o_fb = data->fb_bitmap;
+	n_fb = vmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel);
+	if (!n_fb)
+		return -ENOMEM;
+
+	fb_deferred_io_cleanup(info);
+	/* translate FB content to new bits-per-pixel */
+	if (info->var.bits_per_pixel == 1) {
+		int i, b;
+		for (i = 0; i < PICOLCDFB_SIZE; i++) {
+			u8 p = 0;
+			for (b = 0; b < 8; b++) {
+				p <<= 1;
+				p |= o_fb[i*8+b] ? 0x01 : 0x00;
+			}
+		}
+		info->fix.visual = FB_VISUAL_MONO01;
+		info->fix.line_length = PICOLCDFB_WIDTH / 8;
+	} else {
+		int i;
+		for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+			n_fb[i] = o_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+		info->fix.visual = FB_VISUAL_TRUECOLOR;
+		info->fix.line_length = PICOLCDFB_WIDTH;
+	}
+
+	data->fb_bitmap   = n_fb;
+	data->fb_bpp      = info->var.bits_per_pixel;
+	info->screen_base = (char __force __iomem *)n_fb;
+	info->fix.smem_start = (unsigned long)n_fb;
+	info->fix.smem_len   = PICOLCDFB_SIZE*data->fb_bpp;
+	fb_deferred_io_init(info);
+	vfree(o_fb);
+	return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+	.owner        = THIS_MODULE,
+	.fb_destroy   = picolcd_fb_destroy,
+	.fb_read      = fb_sys_read,
+	.fb_write     = picolcd_fb_write,
+	.fb_blank     = picolcd_fb_blank,
+	.fb_fillrect  = picolcd_fb_fillrect,
+	.fb_copyarea  = picolcd_fb_copyarea,
+	.fb_imageblit = picolcd_fb_imageblit,
+	.fb_check_var = picolcd_fb_check_var,
+	.fb_set_par   = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+	picolcd_fb_update(info->par);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+	.delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+	.deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+	unsigned i, fb_update_rate = data->fb_update_rate;
+	size_t ret = 0;
+
+	for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+		if (ret >= PAGE_SIZE)
+			break;
+		else if (i == fb_update_rate)
+			ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+		else
+			ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+	if (ret > 0)
+		buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+	return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+	int i;
+	unsigned u;
+
+	if (count < 1 || count > 10)
+		return -EINVAL;
+
+	i = sscanf(buf, "%u", &u);
+	if (i != 1)
+		return -EINVAL;
+
+	if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+		return -ERANGE;
+	else if (u == 0)
+		u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+	data->fb_update_rate = u;
+	data->fb_defio.delay = HZ / data->fb_update_rate;
+	return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+		picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+static int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+	struct device *dev = &data->hdev->dev;
+	struct fb_info *info = NULL;
+	int error = -ENOMEM;
+	u8 *fb_vbitmap = NULL;
+	u8 *fb_bitmap  = NULL;
+
+	fb_bitmap = vmalloc(PICOLCDFB_SIZE*picolcdfb_var.bits_per_pixel);
+	if (fb_bitmap == NULL) {
+		dev_err(dev, "can't get a free page for framebuffer\n");
+		goto err_nomem;
+	}
+
+	fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
+	if (fb_vbitmap == NULL) {
+		dev_err(dev, "can't alloc vbitmap image buffer\n");
+		goto err_nomem;
+	}
+
+	data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+	data->fb_defio = picolcd_fb_defio;
+	info = framebuffer_alloc(0, dev);
+	if (info == NULL) {
+		dev_err(dev, "failed to allocate a framebuffer\n");
+		goto err_nomem;
+	}
+
+	info->fbdefio = &data->fb_defio;
+	info->screen_base = (char __force __iomem *)fb_bitmap;
+	info->fbops = &picolcdfb_ops;
+	info->var = picolcdfb_var;
+	info->fix = picolcdfb_fix;
+	info->fix.smem_len   = PICOLCDFB_SIZE;
+	info->fix.smem_start = (unsigned long)fb_bitmap;
+	info->par = data;
+	info->flags = FBINFO_FLAG_DEFAULT;
+
+	data->fb_vbitmap = fb_vbitmap;
+	data->fb_bitmap  = fb_bitmap;
+	data->fb_bpp     = picolcdfb_var.bits_per_pixel;
+	error = picolcd_fb_reset(data, 1);
+	if (error) {
+		dev_err(dev, "failed to configure display\n");
+		goto err_cleanup;
+	}
+	error = device_create_file(dev, &dev_attr_fb_update_rate);
+	if (error) {
+		dev_err(dev, "failed to create sysfs attributes\n");
+		goto err_cleanup;
+	}
+	data->fb_info    = info;
+	error = register_framebuffer(info);
+	if (error) {
+		dev_err(dev, "failed to register framebuffer\n");
+		goto err_sysfs;
+	}
+	fb_deferred_io_init(info);
+	/* schedule first output of framebuffer */
+	schedule_delayed_work(&info->deferred_work, 0);
+	return 0;
+
+err_sysfs:
+	device_remove_file(dev, &dev_attr_fb_update_rate);
+err_cleanup:
+	data->fb_vbitmap = NULL;
+	data->fb_bitmap  = NULL;
+	data->fb_bpp     = 0;
+	data->fb_info    = NULL;
+
+err_nomem:
+	framebuffer_release(info);
+	vfree(fb_bitmap);
+	kfree(fb_vbitmap);
+	return error;
+}
+
+static void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+	struct fb_info *info = data->fb_info;
+	u8 *fb_vbitmap = data->fb_vbitmap;
+	u8 *fb_bitmap  = data->fb_bitmap;
+
+	if (!info)
+		return;
+
+	data->fb_vbitmap = NULL;
+	data->fb_bitmap  = NULL;
+	data->fb_bpp     = 0;
+	data->fb_info    = NULL;
+	device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+	fb_deferred_io_cleanup(info);
+	unregister_framebuffer(info);
+	vfree(fb_bitmap);
+	kfree(fb_vbitmap);
+}
+
+#define picolcd_fbinfo(d) ((d)->fb_info)
+#else
+static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+	return 0;
+}
+static inline int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+	return 0;
+}
+static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+}
+#define picolcd_fbinfo(d) NULL
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+/*
+ * backlight class device
+ */
+static int picolcd_get_brightness(struct backlight_device *bdev)
+{
+	struct picolcd_data *data = bl_get_data(bdev);
+	return data->lcd_brightness;
+}
+
+static int picolcd_set_brightness(struct backlight_device *bdev)
+{
+	struct picolcd_data *data = bl_get_data(bdev);
+	struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
+	unsigned long flags;
+
+	if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+		return -ENODEV;
+
+	data->lcd_brightness = bdev->props.brightness & 0x0ff;
+	data->lcd_power      = bdev->props.power;
+	spin_lock_irqsave(&data->lock, flags);
+	hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+	usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+	return 0;
+}
+
+static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
+{
+	return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
+}
+
+static const struct backlight_ops picolcd_blops = {
+	.update_status  = picolcd_set_brightness,
+	.get_brightness = picolcd_get_brightness,
+	.check_fb       = picolcd_check_bl_fb,
+};
+
+static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
+{
+	struct device *dev = &data->hdev->dev;
+	struct backlight_device *bdev;
+	struct backlight_properties props;
+	if (!report)
+		return -ENODEV;
+	if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+			report->field[0]->report_size != 8) {
+		dev_err(dev, "unsupported BRIGHTNESS report");
+		return -EINVAL;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = 0xff;
+	bdev = backlight_device_register(dev_name(dev), dev, data,
+			&picolcd_blops, &props);
+	if (IS_ERR(bdev)) {
+		dev_err(dev, "failed to register backlight\n");
+		return PTR_ERR(bdev);
+	}
+	bdev->props.brightness     = 0xff;
+	data->lcd_brightness       = 0xff;
+	data->backlight            = bdev;
+	picolcd_set_brightness(bdev);
+	return 0;
+}
+
+static void picolcd_exit_backlight(struct picolcd_data *data)
+{
+	struct backlight_device *bdev = data->backlight;
+
+	data->backlight = NULL;
+	if (bdev)
+		backlight_device_unregister(bdev);
+}
+
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+	if (!data->backlight)
+		return 0;
+	return picolcd_set_brightness(data->backlight);
+}
+
+#ifdef CONFIG_PM
+static void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+	int bl_power = data->lcd_power;
+	if (!data->backlight)
+		return;
+
+	data->backlight->props.power = FB_BLANK_POWERDOWN;
+	picolcd_set_brightness(data->backlight);
+	data->lcd_power = data->backlight->props.power = bl_power;
+}
+#endif /* CONFIG_PM */
+#else
+static inline int picolcd_init_backlight(struct picolcd_data *data,
+		struct hid_report *report)
+{
+	return 0;
+}
+static inline void picolcd_exit_backlight(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+	return 0;
+}
+static inline void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+
+#ifdef CONFIG_HID_PICOLCD_LCD
+/*
+ * lcd class device
+ */
+static int picolcd_get_contrast(struct lcd_device *ldev)
+{
+	struct picolcd_data *data = lcd_get_data(ldev);
+	return data->lcd_contrast;
+}
+
+static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
+{
+	struct picolcd_data *data = lcd_get_data(ldev);
+	struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
+	unsigned long flags;
+
+	if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+		return -ENODEV;
+
+	data->lcd_contrast = contrast & 0x0ff;
+	spin_lock_irqsave(&data->lock, flags);
+	hid_set_field(report->field[0], 0, data->lcd_contrast);
+	usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+	return 0;
+}
+
+static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
+{
+	return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+}
+
+static struct lcd_ops picolcd_lcdops = {
+	.get_contrast   = picolcd_get_contrast,
+	.set_contrast   = picolcd_set_contrast,
+	.check_fb       = picolcd_check_lcd_fb,
+};
+
+static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
+{
+	struct device *dev = &data->hdev->dev;
+	struct lcd_device *ldev;
+
+	if (!report)
+		return -ENODEV;
+	if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+			report->field[0]->report_size != 8) {
+		dev_err(dev, "unsupported CONTRAST report");
+		return -EINVAL;
+	}
+
+	ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
+	if (IS_ERR(ldev)) {
+		dev_err(dev, "failed to register LCD\n");
+		return PTR_ERR(ldev);
+	}
+	ldev->props.max_contrast = 0x0ff;
+	data->lcd_contrast = 0xe5;
+	data->lcd = ldev;
+	picolcd_set_contrast(ldev, 0xe5);
+	return 0;
+}
+
+static void picolcd_exit_lcd(struct picolcd_data *data)
+{
+	struct lcd_device *ldev = data->lcd;
+
+	data->lcd = NULL;
+	if (ldev)
+		lcd_device_unregister(ldev);
+}
+
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+	if (!data->lcd)
+		return 0;
+	return picolcd_set_contrast(data->lcd, data->lcd_contrast);
+}
+#else
+static inline int picolcd_init_lcd(struct picolcd_data *data,
+		struct hid_report *report)
+{
+	return 0;
+}
+static inline void picolcd_exit_lcd(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+	return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LCD */
+
+#ifdef CONFIG_HID_PICOLCD_LEDS
+/**
+ * LED class device
+ */
+static void picolcd_leds_set(struct picolcd_data *data)
+{
+	struct hid_report *report;
+	unsigned long flags;
+
+	if (!data->led[0])
+		return;
+	report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
+	if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+		return;
+
+	spin_lock_irqsave(&data->lock, flags);
+	hid_set_field(report->field[0], 0, data->led_state);
+	usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
+			enum led_brightness value)
+{
+	struct device *dev;
+	struct hid_device *hdev;
+	struct picolcd_data *data;
+	int i, state = 0;
+
+	dev  = led_cdev->dev->parent;
+	hdev = container_of(dev, struct hid_device, dev);
+	data = hid_get_drvdata(hdev);
+	for (i = 0; i < 8; i++) {
+		if (led_cdev != data->led[i])
+			continue;
+		state = (data->led_state >> i) & 1;
+		if (value == LED_OFF && state) {
+			data->led_state &= ~(1 << i);
+			picolcd_leds_set(data);
+		} else if (value != LED_OFF && !state) {
+			data->led_state |= 1 << i;
+			picolcd_leds_set(data);
+		}
+		break;
+	}
+}
+
+static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
+{
+	struct device *dev;
+	struct hid_device *hdev;
+	struct picolcd_data *data;
+	int i, value = 0;
+
+	dev  = led_cdev->dev->parent;
+	hdev = container_of(dev, struct hid_device, dev);
+	data = hid_get_drvdata(hdev);
+	for (i = 0; i < 8; i++)
+		if (led_cdev == data->led[i]) {
+			value = (data->led_state >> i) & 1;
+			break;
+		}
+	return value ? LED_FULL : LED_OFF;
+}
+
+static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
+{
+	struct device *dev = &data->hdev->dev;
+	struct led_classdev *led;
+	size_t name_sz = strlen(dev_name(dev)) + 8;
+	char *name;
+	int i, ret = 0;
+
+	if (!report)
+		return -ENODEV;
+	if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+			report->field[0]->report_size != 8) {
+		dev_err(dev, "unsupported LED_STATE report");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < 8; i++) {
+		led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+		if (!led) {
+			dev_err(dev, "can't allocate memory for LED %d\n", i);
+			ret = -ENOMEM;
+			goto err;
+		}
+		name = (void *)(&led[1]);
+		snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
+		led->name = name;
+		led->brightness = 0;
+		led->max_brightness = 1;
+		led->brightness_get = picolcd_led_get_brightness;
+		led->brightness_set = picolcd_led_set_brightness;
+
+		data->led[i] = led;
+		ret = led_classdev_register(dev, data->led[i]);
+		if (ret) {
+			data->led[i] = NULL;
+			kfree(led);
+			dev_err(dev, "can't register LED %d\n", i);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	for (i = 0; i < 8; i++)
+		if (data->led[i]) {
+			led = data->led[i];
+			data->led[i] = NULL;
+			led_classdev_unregister(led);
+			kfree(led);
+		}
+	return ret;
+}
+
+static void picolcd_exit_leds(struct picolcd_data *data)
+{
+	struct led_classdev *led;
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		led = data->led[i];
+		data->led[i] = NULL;
+		if (!led)
+			continue;
+		led_classdev_unregister(led);
+		kfree(led);
+	}
+}
+
+#else
+static inline int picolcd_init_leds(struct picolcd_data *data,
+		struct hid_report *report)
+{
+	return 0;
+}
+static inline void picolcd_exit_leds(struct picolcd_data *data)
+{
+}
+static inline int picolcd_leds_set(struct picolcd_data *data)
+{
+	return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+/*
+ * input class device
+ */
+static int picolcd_raw_keypad(struct picolcd_data *data,
+		struct hid_report *report, u8 *raw_data, int size)
+{
+	/*
+	 * Keypad event
+	 * First and second data bytes list currently pressed keys,
+	 * 0x00 means no key and at most 2 keys may be pressed at same time
+	 */
+	int i, j;
+
+	/* determine newly pressed keys */
+	for (i = 0; i < size; i++) {
+		unsigned int key_code;
+		if (raw_data[i] == 0)
+			continue;
+		for (j = 0; j < sizeof(data->pressed_keys); j++)
+			if (data->pressed_keys[j] == raw_data[i])
+				goto key_already_down;
+		for (j = 0; j < sizeof(data->pressed_keys); j++)
+			if (data->pressed_keys[j] == 0) {
+				data->pressed_keys[j] = raw_data[i];
+				break;
+			}
+		input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
+		if (raw_data[i] < PICOLCD_KEYS)
+			key_code = data->keycode[raw_data[i]];
+		else
+			key_code = KEY_UNKNOWN;
+		if (key_code != KEY_UNKNOWN) {
+			dbg_hid(PICOLCD_NAME " got key press for %u:%d",
+					raw_data[i], key_code);
+			input_report_key(data->input_keys, key_code, 1);
+		}
+		input_sync(data->input_keys);
+key_already_down:
+		continue;
+	}
+
+	/* determine newly released keys */
+	for (j = 0; j < sizeof(data->pressed_keys); j++) {
+		unsigned int key_code;
+		if (data->pressed_keys[j] == 0)
+			continue;
+		for (i = 0; i < size; i++)
+			if (data->pressed_keys[j] == raw_data[i])
+				goto key_still_down;
+		input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
+		if (data->pressed_keys[j] < PICOLCD_KEYS)
+			key_code = data->keycode[data->pressed_keys[j]];
+		else
+			key_code = KEY_UNKNOWN;
+		if (key_code != KEY_UNKNOWN) {
+			dbg_hid(PICOLCD_NAME " got key release for %u:%d",
+					data->pressed_keys[j], key_code);
+			input_report_key(data->input_keys, key_code, 0);
+		}
+		input_sync(data->input_keys);
+		data->pressed_keys[j] = 0;
+key_still_down:
+		continue;
+	}
+	return 1;
+}
+
+static int picolcd_raw_cir(struct picolcd_data *data,
+		struct hid_report *report, u8 *raw_data, int size)
+{
+	/* Need understanding of CIR data format to implement ... */
+	return 1;
+}
+
+static int picolcd_check_version(struct hid_device *hdev)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	struct picolcd_pending *verinfo;
+	int ret = 0;
+
+	if (!data)
+		return -ENODEV;
+
+	verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
+	if (!verinfo) {
+		dev_err(&hdev->dev, "no version response from PicoLCD");
+		return -ENODEV;
+	}
+
+	if (verinfo->raw_size == 2) {
+		data->version[0] = verinfo->raw_data[1];
+		data->version[1] = verinfo->raw_data[0];
+		if (data->status & PICOLCD_BOOTLOADER) {
+			dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
+					verinfo->raw_data[1], verinfo->raw_data[0]);
+		} else {
+			dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
+					verinfo->raw_data[1], verinfo->raw_data[0]);
+		}
+	} else {
+		dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+		ret = -EINVAL;
+	}
+	kfree(verinfo);
+	return ret;
+}
+
+/*
+ * Reset our device and wait for answer to VERSION request
+ */
+static int picolcd_reset(struct hid_device *hdev)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
+	unsigned long flags;
+	int error;
+
+	if (!data || !report || report->maxfield != 1)
+		return -ENODEV;
+
+	spin_lock_irqsave(&data->lock, flags);
+	if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+		data->status |= PICOLCD_BOOTLOADER;
+
+	/* perform the reset */
+	hid_set_field(report->field[0], 0, 1);
+	usbhid_submit_report(hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	error = picolcd_check_version(hdev);
+	if (error)
+		return error;
+
+	picolcd_resume_lcd(data);
+	picolcd_resume_backlight(data);
+#ifdef CONFIG_HID_PICOLCD_FB
+	if (data->fb_info)
+		schedule_delayed_work(&data->fb_info->deferred_work, 0);
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+	picolcd_leds_set(data);
+	return 0;
+}
+
+/*
+ * The "operation_mode" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+
+	if (data->status & PICOLCD_BOOTLOADER)
+		return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+}
+
+static ssize_t picolcd_operation_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+	struct hid_report *report = NULL;
+	size_t cnt = count;
+	int timeout = data->opmode_delay;
+	unsigned long flags;
+
+	if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+		if (data->status & PICOLCD_BOOTLOADER)
+			report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
+		buf += 3;
+		cnt -= 3;
+	} else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+		if (!(data->status & PICOLCD_BOOTLOADER))
+			report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
+		buf += 10;
+		cnt -= 10;
+	}
+	if (!report)
+		return -EINVAL;
+
+	while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+		cnt--;
+	if (cnt != 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&data->lock, flags);
+	hid_set_field(report->field[0], 0, timeout & 0xff);
+	hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
+	usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+	spin_unlock_irqrestore(&data->lock, flags);
+	return count;
+}
+
+static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
+		picolcd_operation_mode_store);
+
+/*
+ * The "operation_mode_delay" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+}
+
+static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct picolcd_data *data = dev_get_drvdata(dev);
+	unsigned u;
+	if (sscanf(buf, "%u", &u) != 1)
+		return -EINVAL;
+	if (u > 30000)
+		return -EINVAL;
+	else
+		data->opmode_delay = u;
+	return count;
+}
+
+static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
+		picolcd_operation_mode_delay_store);
+
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * The "reset" file
+ */
+static int picolcd_debug_reset_show(struct seq_file *f, void *p)
+{
+	if (picolcd_fbinfo((struct picolcd_data *)f->private))
+		seq_printf(f, "all fb\n");
+	else
+		seq_printf(f, "all\n");
+	return 0;
+}
+
+static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, picolcd_debug_reset_show, inode->i_private);
+}
+
+static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
+		size_t count, loff_t *ppos)
+{
+	struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
+	char buf[32];
+	size_t cnt = min(count, sizeof(buf)-1);
+	if (copy_from_user(buf, user_buf, cnt))
+		return -EFAULT;
+
+	while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
+		cnt--;
+	buf[cnt] = '\0';
+	if (strcmp(buf, "all") == 0) {
+		picolcd_reset(data->hdev);
+		picolcd_fb_reset(data, 1);
+	} else if (strcmp(buf, "fb") == 0) {
+		picolcd_fb_reset(data, 1);
+	} else {
+		return -EINVAL;
+	}
+	return count;
+}
+
+static const struct file_operations picolcd_debug_reset_fops = {
+	.owner    = THIS_MODULE,
+	.open     = picolcd_debug_reset_open,
+	.read     = seq_read,
+	.llseek   = seq_lseek,
+	.write    = picolcd_debug_reset_write,
+	.release  = single_release,
+};
+
+/*
+ * The "eeprom" file
+ */
+static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
+{
+	f->private_data = i->i_private;
+	return 0;
+}
+
+static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
+		size_t s, loff_t *off)
+{
+	struct picolcd_data *data = f->private_data;
+	struct picolcd_pending *resp;
+	u8 raw_data[3];
+	ssize_t ret = -EIO;
+
+	if (s == 0)
+		return -EINVAL;
+	if (*off > 0x0ff)
+		return 0;
+
+	/* prepare buffer with info about what we want to read (addr & len) */
+	raw_data[0] = *off & 0xff;
+	raw_data[1] = (*off >> 8) && 0xff;
+	raw_data[2] = s < 20 ? s : 20;
+	if (*off + raw_data[2] > 0xff)
+		raw_data[2] = 0x100 - *off;
+	resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
+			sizeof(raw_data));
+	if (!resp)
+		return -EIO;
+
+	if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+		/* successful read :) */
+		ret = resp->raw_data[2];
+		if (ret > s)
+			ret = s;
+		if (copy_to_user(u, resp->raw_data+3, ret))
+			ret = -EFAULT;
+		else
+			*off += ret;
+	} /* anything else is some kind of IO error */
+
+	kfree(resp);
+	return ret;
+}
+
+static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
+		size_t s, loff_t *off)
+{
+	struct picolcd_data *data = f->private_data;
+	struct picolcd_pending *resp;
+	ssize_t ret = -EIO;
+	u8 raw_data[23];
+
+	if (s == 0)
+		return -EINVAL;
+	if (*off > 0x0ff)
+		return -ENOSPC;
+
+	memset(raw_data, 0, sizeof(raw_data));
+	raw_data[0] = *off & 0xff;
+	raw_data[1] = (*off >> 8) && 0xff;
+	raw_data[2] = s < 20 ? s : 20;
+	if (*off + raw_data[2] > 0xff)
+		raw_data[2] = 0x100 - *off;
+
+	if (copy_from_user(raw_data+3, u, raw_data[2]))
+		return -EFAULT;
+	resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
+			sizeof(raw_data));
+
+	if (!resp)
+		return -EIO;
+
+	if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+		/* check if written data matches */
+		if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
+			*off += raw_data[2];
+			ret = raw_data[2];
+		}
+	}
+	kfree(resp);
+	return ret;
+}
+
+/*
+ * Notes:
+ * - read/write happens in chunks of at most 20 bytes, it's up to userspace
+ *   to loop in order to get more data.
+ * - on write errors on otherwise correct write request the bytes
+ *   that should have been written are in undefined state.
+ */
+static const struct file_operations picolcd_debug_eeprom_fops = {
+	.owner    = THIS_MODULE,
+	.open     = picolcd_debug_eeprom_open,
+	.read     = picolcd_debug_eeprom_read,
+	.write    = picolcd_debug_eeprom_write,
+	.llseek   = generic_file_llseek,
+};
+
+/*
+ * The "flash" file
+ */
+static int picolcd_debug_flash_open(struct inode *i, struct file *f)
+{
+	f->private_data = i->i_private;
+	return 0;
+}
+
+/* record a flash address to buf (bounds check to be done by caller) */
+static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
+{
+	buf[0] = off & 0xff;
+	buf[1] = (off >> 8) & 0xff;
+	if (data->addr_sz == 3)
+		buf[2] = (off >> 16) & 0xff;
+	return data->addr_sz == 2 ? 2 : 3;
+}
+
+/* read a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
+		char __user *u, size_t s, loff_t *off)
+{
+	struct picolcd_pending *resp;
+	u8 raw_data[4];
+	ssize_t ret = 0;
+	int len_off, err = -EIO;
+
+	while (s > 0) {
+		err = -EIO;
+		len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+		raw_data[len_off] = s > 32 ? 32 : s;
+		resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
+		if (!resp || !resp->in_report)
+			goto skip;
+		if (resp->in_report->id == REPORT_MEMORY ||
+			resp->in_report->id == REPORT_BL_READ_MEMORY) {
+			if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
+				goto skip;
+			if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
+				err = -EFAULT;
+				goto skip;
+			}
+			*off += raw_data[len_off];
+			s    -= raw_data[len_off];
+			ret  += raw_data[len_off];
+			err   = 0;
+		}
+skip:
+		kfree(resp);
+		if (err)
+			return ret > 0 ? ret : err;
+	}
+	return ret;
+}
+
+static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
+		size_t s, loff_t *off)
+{
+	struct picolcd_data *data = f->private_data;
+
+	if (s == 0)
+		return -EINVAL;
+	if (*off > 0x05fff)
+		return 0;
+	if (*off + s > 0x05fff)
+		s = 0x06000 - *off;
+
+	if (data->status & PICOLCD_BOOTLOADER)
+		return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
+	else
+		return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
+}
+
+/* erase block aligned to 64bytes boundary */
+static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
+		loff_t *off)
+{
+	struct picolcd_pending *resp;
+	u8 raw_data[3];
+	int len_off;
+	ssize_t ret = -EIO;
+
+	if (*off & 0x3f)
+		return -EINVAL;
+
+	len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+	resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
+	if (!resp || !resp->in_report)
+		goto skip;
+	if (resp->in_report->id == REPORT_MEMORY ||
+		resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
+		if (memcmp(raw_data, resp->raw_data, len_off) != 0)
+			goto skip;
+		ret = 0;
+	}
+skip:
+	kfree(resp);
+	return ret;
+}
+
+/* write a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
+		const char __user *u, size_t s, loff_t *off)
+{
+	struct picolcd_pending *resp;
+	u8 raw_data[36];
+	ssize_t ret = 0;
+	int len_off, err = -EIO;
+
+	while (s > 0) {
+		err = -EIO;
+		len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+		raw_data[len_off] = s > 32 ? 32 : s;
+		if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
+			err = -EFAULT;
+			break;
+		}
+		resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
+				len_off+1+raw_data[len_off]);
+		if (!resp || !resp->in_report)
+			goto skip;
+		if (resp->in_report->id == REPORT_MEMORY ||
+			resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
+			if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
+				goto skip;
+			*off += raw_data[len_off];
+			s    -= raw_data[len_off];
+			ret  += raw_data[len_off];
+			err   = 0;
+		}
+skip:
+		kfree(resp);
+		if (err)
+			break;
+	}
+	return ret > 0 ? ret : err;
+}
+
+static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
+		size_t s, loff_t *off)
+{
+	struct picolcd_data *data = f->private_data;
+	ssize_t err, ret = 0;
+	int report_erase, report_write;
+
+	if (s == 0)
+		return -EINVAL;
+	if (*off > 0x5fff)
+		return -ENOSPC;
+	if (s & 0x3f)
+		return -EINVAL;
+	if (*off & 0x3f)
+		return -EINVAL;
+
+	if (data->status & PICOLCD_BOOTLOADER) {
+		report_erase = REPORT_BL_ERASE_MEMORY;
+		report_write = REPORT_BL_WRITE_MEMORY;
+	} else {
+		report_erase = REPORT_ERASE_MEMORY;
+		report_write = REPORT_WRITE_MEMORY;
+	}
+	mutex_lock(&data->mutex_flash);
+	while (s > 0) {
+		err = _picolcd_flash_erase64(data, report_erase, off);
+		if (err)
+			break;
+		err = _picolcd_flash_write(data, report_write, u, 64, off);
+		if (err < 0)
+			break;
+		ret += err;
+		*off += err;
+		s -= err;
+		if (err != 64)
+			break;
+	}
+	mutex_unlock(&data->mutex_flash);
+	return ret > 0 ? ret : err;
+}
+
+/*
+ * Notes:
+ * - concurrent writing is prevented by mutex and all writes must be
+ *   n*64 bytes and 64-byte aligned, each write being preceeded by an
+ *   ERASE which erases a 64byte block.
+ *   If less than requested was written or an error is returned for an
+ *   otherwise correct write request the next 64-byte block which should
+ *   have been written is in undefined state (mostly: original, erased,
+ *   (half-)written with write error)
+ * - reading can happend without special restriction
+ */
+static const struct file_operations picolcd_debug_flash_fops = {
+	.owner    = THIS_MODULE,
+	.open     = picolcd_debug_flash_open,
+	.read     = picolcd_debug_flash_read,
+	.write    = picolcd_debug_flash_write,
+	.llseek   = generic_file_llseek,
+};
+
+
+/*
+ * Helper code for HID report level dumping/debugging
+ */
+static const char *error_codes[] = {
+	"success", "parameter missing", "data_missing", "block readonly",
+	"block not erasable", "block too big", "section overflow",
+	"invalid command length", "invalid data length",
+};
+
+static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
+		const size_t data_len)
+{
+	int i, j;
+	for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
+		dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
+		dst[j++] = hex_asc[data[i] & 0x0f];
+		dst[j++] = ' ';
+	}
+	if (j < dst_sz) {
+		dst[j--] = '\0';
+		dst[j] = '\n';
+	} else
+		dst[j] = '\0';
+}
+
+static void picolcd_debug_out_report(struct picolcd_data *data,
+		struct hid_device *hdev, struct hid_report *report)
+{
+	u8 raw_data[70];
+	int raw_size = (report->size >> 3) + 1;
+	char *buff;
+#define BUFF_SZ 256
+
+	/* Avoid unnecessary overhead if debugfs is disabled */
+	if (!hdev->debug_events)
+		return;
+
+	buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+	if (!buff)
+		return;
+
+	snprintf(buff, BUFF_SZ, "\nout report %d (size %d) =  ",
+			report->id, raw_size);
+	hid_debug_event(hdev, buff);
+	if (raw_size + 5 > sizeof(raw_data)) {
+		hid_debug_event(hdev, " TOO BIG\n");
+		return;
+	} else {
+		raw_data[0] = report->id;
+		hid_output_report(report, raw_data);
+		dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+		hid_debug_event(hdev, buff);
+	}
+
+	switch (report->id) {
+	case REPORT_LED_STATE:
+		/* 1 data byte with GPO state */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_LED_STATE", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_BRIGHTNESS:
+		/* 1 data byte with brightness */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_BRIGHTNESS", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_CONTRAST:
+		/* 1 data byte with contrast */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_CONTRAST", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_RESET:
+		/* 2 data bytes with reset duration in ms */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_RESET", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
+				raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_LCD_CMD:
+		/* 63 data bytes with LCD commands */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_LCD_CMD", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		/* TODO: format decoding */
+		break;
+	case REPORT_LCD_DATA:
+		/* 63 data bytes with LCD data */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_LCD_CMD", report->id, raw_size-1);
+		/* TODO: format decoding */
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_LCD_CMD_DATA:
+		/* 63 data bytes with LCD commands and data */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_LCD_CMD", report->id, raw_size-1);
+		/* TODO: format decoding */
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_EE_READ:
+		/* 3 data bytes with read area description */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_EE_READ", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_EE_WRITE:
+		/* 3+1..20 data bytes with write area description */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_EE_WRITE", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+		hid_debug_event(hdev, buff);
+		if (raw_data[3] == 0) {
+			snprintf(buff, BUFF_SZ, "\tNo data\n");
+		} else if (raw_data[3] + 4 <= raw_size) {
+			snprintf(buff, BUFF_SZ, "\tData: ");
+			hid_debug_event(hdev, buff);
+			dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+		} else {
+			snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+		}
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_ERASE_MEMORY:
+	case REPORT_BL_ERASE_MEMORY:
+		/* 3 data bytes with pointer inside erase block */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_ERASE_MEMORY", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		switch (data->addr_sz) {
+		case 2:
+			snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
+					raw_data[2], raw_data[1]);
+			break;
+		case 3:
+			snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
+					raw_data[3], raw_data[2], raw_data[1]);
+			break;
+		default:
+			snprintf(buff, BUFF_SZ, "\tNot supported\n");
+		}
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_READ_MEMORY:
+	case REPORT_BL_READ_MEMORY:
+		/* 4 data bytes with read area description */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_READ_MEMORY", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		switch (data->addr_sz) {
+		case 2:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+					raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+			break;
+		case 3:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+					raw_data[3], raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+			break;
+		default:
+			snprintf(buff, BUFF_SZ, "\tNot supported\n");
+		}
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_WRITE_MEMORY:
+	case REPORT_BL_WRITE_MEMORY:
+		/* 4+1..32 data bytes with write adrea description */
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_WRITE_MEMORY", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		switch (data->addr_sz) {
+		case 2:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+					raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+			hid_debug_event(hdev, buff);
+			if (raw_data[3] == 0) {
+				snprintf(buff, BUFF_SZ, "\tNo data\n");
+			} else if (raw_data[3] + 4 <= raw_size) {
+				snprintf(buff, BUFF_SZ, "\tData: ");
+				hid_debug_event(hdev, buff);
+				dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+			} else {
+				snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+			}
+			break;
+		case 3:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+					raw_data[3], raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+			hid_debug_event(hdev, buff);
+			if (raw_data[4] == 0) {
+				snprintf(buff, BUFF_SZ, "\tNo data\n");
+			} else if (raw_data[4] + 5 <= raw_size) {
+				snprintf(buff, BUFF_SZ, "\tData: ");
+				hid_debug_event(hdev, buff);
+				dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+			} else {
+				snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+			}
+			break;
+		default:
+			snprintf(buff, BUFF_SZ, "\tNot supported\n");
+		}
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_SPLASH_RESTART:
+		/* TODO */
+		break;
+	case REPORT_EXIT_KEYBOARD:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+				raw_data[1] | (raw_data[2] << 8),
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_VERSION:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_VERSION", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_DEVID:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_DEVID", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_SPLASH_SIZE:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_SPLASH_SIZE", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_HOOK_VERSION:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_HOOK_VERSION", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_EXIT_FLASHER:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"REPORT_VERSION", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+				raw_data[1] | (raw_data[2] << 8),
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	default:
+		snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+			"<unknown>", report->id, raw_size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	}
+	wake_up_interruptible(&hdev->debug_wait);
+	kfree(buff);
+}
+
+static void picolcd_debug_raw_event(struct picolcd_data *data,
+		struct hid_device *hdev, struct hid_report *report,
+		u8 *raw_data, int size)
+{
+	char *buff;
+
+#define BUFF_SZ 256
+	/* Avoid unnecessary overhead if debugfs is disabled */
+	if (!hdev->debug_events)
+		return;
+
+	buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+	if (!buff)
+		return;
+
+	switch (report->id) {
+	case REPORT_ERROR_CODE:
+		/* 2 data bytes with affected report and error code */
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_ERROR_CODE", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		if (raw_data[2] < ARRAY_SIZE(error_codes))
+			snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
+					raw_data[2], error_codes[raw_data[2]], raw_data[1]);
+		else
+			snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
+					raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_KEY_STATE:
+		/* 2 data bytes with key state */
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_KEY_STATE", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		if (raw_data[1] == 0)
+			snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
+		else if (raw_data[2] == 0)
+			snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
+					raw_data[1], raw_data[1]);
+		else
+			snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
+					raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_IR_DATA:
+		/* Up to 20 byes of IR scancode data */
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_IR_DATA", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		if (raw_data[1] == 0) {
+			snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
+			hid_debug_event(hdev, buff);
+		} else if (raw_data[1] + 1 <= size) {
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
+					raw_data[1]-1);
+			hid_debug_event(hdev, buff);
+			dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
+			hid_debug_event(hdev, buff);
+		} else {
+			snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
+					raw_data[1]-1);
+			hid_debug_event(hdev, buff);
+		}
+		break;
+	case REPORT_EE_DATA:
+		/* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_EE_DATA", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+		hid_debug_event(hdev, buff);
+		if (raw_data[3] == 0) {
+			snprintf(buff, BUFF_SZ, "\tNo data\n");
+			hid_debug_event(hdev, buff);
+		} else if (raw_data[3] + 4 <= size) {
+			snprintf(buff, BUFF_SZ, "\tData: ");
+			hid_debug_event(hdev, buff);
+			dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+			hid_debug_event(hdev, buff);
+		} else {
+			snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+			hid_debug_event(hdev, buff);
+		}
+		break;
+	case REPORT_MEMORY:
+		/* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_MEMORY", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		switch (data->addr_sz) {
+		case 2:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+					raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+			hid_debug_event(hdev, buff);
+			if (raw_data[3] == 0) {
+				snprintf(buff, BUFF_SZ, "\tNo data\n");
+			} else if (raw_data[3] + 4 <= size) {
+				snprintf(buff, BUFF_SZ, "\tData: ");
+				hid_debug_event(hdev, buff);
+				dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+			} else {
+				snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+			}
+			break;
+		case 3:
+			snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+					raw_data[3], raw_data[2], raw_data[1]);
+			hid_debug_event(hdev, buff);
+			snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+			hid_debug_event(hdev, buff);
+			if (raw_data[4] == 0) {
+				snprintf(buff, BUFF_SZ, "\tNo data\n");
+			} else if (raw_data[4] + 5 <= size) {
+				snprintf(buff, BUFF_SZ, "\tData: ");
+				hid_debug_event(hdev, buff);
+				dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+			} else {
+				snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+			}
+			break;
+		default:
+			snprintf(buff, BUFF_SZ, "\tNot supported\n");
+		}
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_VERSION:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_VERSION", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+				raw_data[2], raw_data[1]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_BL_ERASE_MEMORY:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_BL_ERASE_MEMORY", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		/* TODO */
+		break;
+	case REPORT_BL_READ_MEMORY:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_BL_READ_MEMORY", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		/* TODO */
+		break;
+	case REPORT_BL_WRITE_MEMORY:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_BL_WRITE_MEMORY", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		/* TODO */
+		break;
+	case REPORT_DEVID:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_DEVID", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
+				raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
+				raw_data[5]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_SPLASH_SIZE:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_SPLASH_SIZE", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
+				(raw_data[2] << 8) | raw_data[1]);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
+				(raw_data[4] << 8) | raw_data[3]);
+		hid_debug_event(hdev, buff);
+		break;
+	case REPORT_HOOK_VERSION:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"REPORT_HOOK_VERSION", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+				raw_data[1], raw_data[2]);
+		hid_debug_event(hdev, buff);
+		break;
+	default:
+		snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+			"<unknown>", report->id, size-1);
+		hid_debug_event(hdev, buff);
+		break;
+	}
+	wake_up_interruptible(&hdev->debug_wait);
+	kfree(buff);
+}
+
+static void picolcd_init_devfs(struct picolcd_data *data,
+		struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+		struct hid_report *flash_r, struct hid_report *flash_w,
+		struct hid_report *reset)
+{
+	struct hid_device *hdev = data->hdev;
+
+	mutex_init(&data->mutex_flash);
+
+	/* reset */
+	if (reset)
+		data->debug_reset = debugfs_create_file("reset", 0600,
+				hdev->debug_dir, data, &picolcd_debug_reset_fops);
+
+	/* eeprom */
+	if (eeprom_r || eeprom_w)
+		data->debug_eeprom = debugfs_create_file("eeprom",
+			(eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
+			hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
+
+	/* flash */
+	if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
+		data->addr_sz = flash_r->field[0]->report_count - 1;
+	else
+		data->addr_sz = -1;
+	if (data->addr_sz == 2 || data->addr_sz == 3) {
+		data->debug_flash = debugfs_create_file("flash",
+			(flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
+			hdev->debug_dir, data, &picolcd_debug_flash_fops);
+	} else if (flash_r || flash_w)
+		dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
+				"please submit rdesc for review\n");
+}
+
+static void picolcd_exit_devfs(struct picolcd_data *data)
+{
+	struct dentry *dent;
+
+	dent = data->debug_reset;
+	data->debug_reset = NULL;
+	if (dent)
+		debugfs_remove(dent);
+	dent = data->debug_eeprom;
+	data->debug_eeprom = NULL;
+	if (dent)
+		debugfs_remove(dent);
+	dent = data->debug_flash;
+	data->debug_flash = NULL;
+	if (dent)
+		debugfs_remove(dent);
+	mutex_destroy(&data->mutex_flash);
+}
+#else
+static inline void picolcd_debug_raw_event(struct picolcd_data *data,
+		struct hid_device *hdev, struct hid_report *report,
+		u8 *raw_data, int size)
+{
+}
+static inline void picolcd_init_devfs(struct picolcd_data *data,
+		struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+		struct hid_report *flash_r, struct hid_report *flash_w,
+		struct hid_report *reset)
+{
+}
+static inline void picolcd_exit_devfs(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Handle raw report as sent by device
+ */
+static int picolcd_raw_event(struct hid_device *hdev,
+		struct hid_report *report, u8 *raw_data, int size)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!data)
+		return 1;
+
+	if (report->id == REPORT_KEY_STATE) {
+		if (data->input_keys)
+			ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+	} else if (report->id == REPORT_IR_DATA) {
+		if (data->input_cir)
+			ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+	} else {
+		spin_lock_irqsave(&data->lock, flags);
+		/*
+		 * We let the caller of picolcd_send_and_wait() check if the
+		 * report we got is one of the expected ones or not.
+		 */
+		if (data->pending) {
+			memcpy(data->pending->raw_data, raw_data+1, size-1);
+			data->pending->raw_size  = size-1;
+			data->pending->in_report = report;
+			complete(&data->pending->ready);
+		}
+		spin_unlock_irqrestore(&data->lock, flags);
+	}
+
+	picolcd_debug_raw_event(data, hdev, report, raw_data, size);
+	return 1;
+}
+
+#ifdef CONFIG_PM
+static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+{
+	if (message.event & PM_EVENT_AUTO)
+		return 0;
+
+	picolcd_suspend_backlight(hid_get_drvdata(hdev));
+	dbg_hid(PICOLCD_NAME " device ready for suspend\n");
+	return 0;
+}
+
+static int picolcd_resume(struct hid_device *hdev)
+{
+	int ret;
+	ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+	if (ret)
+		dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+	return 0;
+}
+
+static int picolcd_reset_resume(struct hid_device *hdev)
+{
+	int ret;
+	ret = picolcd_reset(hdev);
+	if (ret)
+		dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
+	ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
+	if (ret)
+		dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
+	ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
+	if (ret)
+		dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
+	ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+	if (ret)
+		dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+	picolcd_leds_set(hid_get_drvdata(hdev));
+	return 0;
+}
+#endif
+
+/* initialize keypad input device */
+static int picolcd_init_keys(struct picolcd_data *data,
+		struct hid_report *report)
+{
+	struct hid_device *hdev = data->hdev;
+	struct input_dev *idev;
+	int error, i;
+
+	if (!report)
+		return -ENODEV;
+	if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
+			report->field[0]->report_size != 8) {
+		dev_err(&hdev->dev, "unsupported KEY_STATE report");
+		return -EINVAL;
+	}
+
+	idev = input_allocate_device();
+	if (idev == NULL) {
+		dev_err(&hdev->dev, "failed to allocate input device");
+		return -ENOMEM;
+	}
+	input_set_drvdata(idev, hdev);
+	memcpy(data->keycode, def_keymap, sizeof(def_keymap));
+	idev->name = hdev->name;
+	idev->phys = hdev->phys;
+	idev->uniq = hdev->uniq;
+	idev->id.bustype = hdev->bus;
+	idev->id.vendor  = hdev->vendor;
+	idev->id.product = hdev->product;
+	idev->id.version = hdev->version;
+	idev->dev.parent = hdev->dev.parent;
+	idev->keycode     = &data->keycode;
+	idev->keycodemax  = PICOLCD_KEYS;
+	idev->keycodesize = sizeof(data->keycode[0]);
+	input_set_capability(idev, EV_MSC, MSC_SCAN);
+	set_bit(EV_REP, idev->evbit);
+	for (i = 0; i < PICOLCD_KEYS; i++)
+		input_set_capability(idev, EV_KEY, data->keycode[i]);
+	error = input_register_device(idev);
+	if (error) {
+		dev_err(&hdev->dev, "error registering the input device");
+		input_free_device(idev);
+		return error;
+	}
+	data->input_keys = idev;
+	return 0;
+}
+
+static void picolcd_exit_keys(struct picolcd_data *data)
+{
+	struct input_dev *idev = data->input_keys;
+
+	data->input_keys = NULL;
+	if (idev)
+		input_unregister_device(idev);
+}
+
+/* initialize CIR input device */
+static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
+{
+	/* support not implemented yet */
+	return 0;
+}
+
+static inline void picolcd_exit_cir(struct picolcd_data *data)
+{
+}
+
+static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
+{
+	int error;
+
+	error = picolcd_check_version(hdev);
+	if (error)
+		return error;
+
+	if (data->version[0] != 0 && data->version[1] != 3)
+		dev_info(&hdev->dev, "Device with untested firmware revision, "
+				"please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+				dev_name(&hdev->dev));
+
+	/* Setup keypad input device */
+	error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
+	if (error)
+		goto err;
+
+	/* Setup CIR input device */
+	error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
+	if (error)
+		goto err;
+
+	/* Set up the framebuffer device */
+	error = picolcd_init_framebuffer(data);
+	if (error)
+		goto err;
+
+	/* Setup lcd class device */
+	error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
+	if (error)
+		goto err;
+
+	/* Setup backlight class device */
+	error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
+	if (error)
+		goto err;
+
+	/* Setup the LED class devices */
+	error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
+	if (error)
+		goto err;
+
+	picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
+			picolcd_out_report(REPORT_EE_WRITE, hdev),
+			picolcd_out_report(REPORT_READ_MEMORY, hdev),
+			picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
+			picolcd_out_report(REPORT_RESET, hdev));
+	return 0;
+err:
+	picolcd_exit_leds(data);
+	picolcd_exit_backlight(data);
+	picolcd_exit_lcd(data);
+	picolcd_exit_framebuffer(data);
+	picolcd_exit_cir(data);
+	picolcd_exit_keys(data);
+	return error;
+}
+
+static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
+{
+	int error;
+
+	error = picolcd_check_version(hdev);
+	if (error)
+		return error;
+
+	if (data->version[0] != 1 && data->version[1] != 0)
+		dev_info(&hdev->dev, "Device with untested bootloader revision, "
+				"please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+				dev_name(&hdev->dev));
+
+	picolcd_init_devfs(data, NULL, NULL,
+			picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
+			picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
+	return 0;
+}
+
+static int picolcd_probe(struct hid_device *hdev,
+		     const struct hid_device_id *id)
+{
+	struct picolcd_data *data;
+	int error = -ENOMEM;
+
+	dbg_hid(PICOLCD_NAME " hardware probe...\n");
+
+	/*
+	 * Let's allocate the picolcd data structure, set some reasonable
+	 * defaults, and associate it with the device
+	 */
+	data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+		error = -ENOMEM;
+		goto err_no_cleanup;
+	}
+
+	spin_lock_init(&data->lock);
+	mutex_init(&data->mutex);
+	data->hdev = hdev;
+	data->opmode_delay = 5000;
+	if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+		data->status |= PICOLCD_BOOTLOADER;
+	hid_set_drvdata(hdev, data);
+
+	/* Parse the device reports and start it up */
+	error = hid_parse(hdev);
+	if (error) {
+		dev_err(&hdev->dev, "device report parse failed\n");
+		goto err_cleanup_data;
+	}
+
+	/* We don't use hidinput but hid_hw_start() fails if nothing is
+	 * claimed. So spoof claimed input. */
+	hdev->claimed = HID_CLAIMED_INPUT;
+	error = hid_hw_start(hdev, 0);
+	hdev->claimed = 0;
+	if (error) {
+		dev_err(&hdev->dev, "hardware start failed\n");
+		goto err_cleanup_data;
+	}
+
+	error = hdev->ll_driver->open(hdev);
+	if (error) {
+		dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+		goto err_cleanup_hid_hw;
+	}
+
+	error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
+	if (error) {
+		dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+		goto err_cleanup_hid_ll;
+	}
+
+	error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
+	if (error) {
+		dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+		goto err_cleanup_sysfs1;
+	}
+
+	if (data->status & PICOLCD_BOOTLOADER)
+		error = picolcd_probe_bootloader(hdev, data);
+	else
+		error = picolcd_probe_lcd(hdev, data);
+	if (error)
+		goto err_cleanup_sysfs2;
+
+	dbg_hid(PICOLCD_NAME " activated and initialized\n");
+	return 0;
+
+err_cleanup_sysfs2:
+	device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+err_cleanup_sysfs1:
+	device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+err_cleanup_hid_ll:
+	hdev->ll_driver->close(hdev);
+err_cleanup_hid_hw:
+	hid_hw_stop(hdev);
+err_cleanup_data:
+	kfree(data);
+err_no_cleanup:
+	hid_set_drvdata(hdev, NULL);
+
+	return error;
+}
+
+static void picolcd_remove(struct hid_device *hdev)
+{
+	struct picolcd_data *data = hid_get_drvdata(hdev);
+	unsigned long flags;
+
+	dbg_hid(PICOLCD_NAME " hardware remove...\n");
+	spin_lock_irqsave(&data->lock, flags);
+	data->status |= PICOLCD_FAILED;
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	picolcd_exit_devfs(data);
+	device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+	device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+	hdev->ll_driver->close(hdev);
+	hid_hw_stop(hdev);
+	hid_set_drvdata(hdev, NULL);
+
+	/* Shortcut potential pending reply that will never arrive */
+	spin_lock_irqsave(&data->lock, flags);
+	if (data->pending)
+		complete(&data->pending->ready);
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	/* Cleanup LED */
+	picolcd_exit_leds(data);
+	/* Clean up the framebuffer */
+	picolcd_exit_backlight(data);
+	picolcd_exit_lcd(data);
+	picolcd_exit_framebuffer(data);
+	/* Cleanup input */
+	picolcd_exit_cir(data);
+	picolcd_exit_keys(data);
+
+	mutex_destroy(&data->mutex);
+	/* Finally, clean up the picolcd data itself */
+	kfree(data);
+}
+
+static const struct hid_device_id picolcd_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, picolcd_devices);
+
+static struct hid_driver picolcd_driver = {
+	.name =          "hid-picolcd",
+	.id_table =      picolcd_devices,
+	.probe =         picolcd_probe,
+	.remove =        picolcd_remove,
+	.raw_event =     picolcd_raw_event,
+#ifdef CONFIG_PM
+	.suspend =       picolcd_suspend,
+	.resume =        picolcd_resume,
+	.reset_resume =  picolcd_reset_resume,
+#endif
+};
+
+static int __init picolcd_init(void)
+{
+	return hid_register_driver(&picolcd_driver);
+}
+
+static void __exit picolcd_exit(void)
+{
+	hid_unregister_driver(&picolcd_driver);
+}
+
+module_init(picolcd_init);
+module_exit(picolcd_exit);
+MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
new file mode 100644
index 0000000..845f428
--- /dev/null
+++ b/drivers/hid/hid-prodikeys.c
@@ -0,0 +1,910 @@
+/*
+ *  HID driver for the Prodikeys PC-MIDI Keyboard
+ *  providing midi & extra multimedia keys functionality
+ *
+ *  Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ *  Controls for Octave Shift Up/Down, Channel, and
+ *  Sustain Duration available via sysfs.
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/hid.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+
+#define pk_debug(format, arg...) \
+	pr_debug("hid-prodikeys: " format "\n" , ## arg)
+#define pk_error(format, arg...) \
+	pr_err("hid-prodikeys: " format "\n" , ## arg)
+
+struct pcmidi_snd;
+
+struct pk_device {
+	unsigned long		quirks;
+
+	struct hid_device	*hdev;
+	struct pcmidi_snd	*pm; /* pcmidi device context */
+};
+
+struct pcmidi_snd;
+
+struct pcmidi_sustain {
+	unsigned long		in_use;
+	struct pcmidi_snd	*pm;
+	struct timer_list	timer;
+	unsigned char		status;
+	unsigned char		note;
+	unsigned char		velocity;
+};
+
+#define PCMIDI_SUSTAINED_MAX	32
+struct pcmidi_snd {
+	struct pk_device		*pk;
+	unsigned short			ifnum;
+	struct hid_report		*pcmidi_report6;
+	struct input_dev		*input_ep82;
+	unsigned short			midi_mode;
+	unsigned short			midi_sustain_mode;
+	unsigned short			midi_sustain;
+	unsigned short			midi_channel;
+	short				midi_octave;
+	struct pcmidi_sustain		sustained_notes[PCMIDI_SUSTAINED_MAX];
+	unsigned short			fn_state;
+	unsigned short			last_key[24];
+	spinlock_t			rawmidi_in_lock;
+	struct snd_card			*card;
+	struct snd_rawmidi		*rwmidi;
+	struct snd_rawmidi_substream	*in_substream;
+	struct snd_rawmidi_substream	*out_substream;
+	unsigned long			in_triggered;
+	unsigned long			out_active;
+};
+
+#define PK_QUIRK_NOGET	0x00010000
+#define PCMIDI_MIDDLE_C 60
+#define PCMIDI_CHANNEL_MIN 0
+#define PCMIDI_CHANNEL_MAX 15
+#define PCMIDI_OCTAVE_MIN (-2)
+#define PCMIDI_OCTAVE_MAX 2
+#define PCMIDI_SUSTAIN_MIN 0
+#define PCMIDI_SUSTAIN_MAX 5000
+
+static const char shortname[] = "PC-MIDI";
+static const char longname[] = "Prodikeys PC-MIDI Keyboard";
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+module_param_array(index, int, NULL, 0444);
+module_param_array(id, charp, NULL, 0444);
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
+
+
+/* Output routine for the sysfs channel file */
+static ssize_t show_channel(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
+
+	return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
+		PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
+}
+
+/* Input routine for the sysfs channel file */
+static ssize_t store_channel(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	unsigned channel = 0;
+
+	if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
+		dbg_hid("pcmidi sysfs write channel=%u\n", channel);
+		pk->pm->midi_channel = channel;
+		return strlen(buf);
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+		store_channel);
+
+static struct device_attribute *sysfs_device_attr_channel = {
+		&dev_attr_channel,
+		};
+
+/* Output routine for the sysfs sustain file */
+static ssize_t show_sustain(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
+
+	return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
+		PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
+}
+
+/* Input routine for the sysfs sustain file */
+static ssize_t store_sustain(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	unsigned sustain = 0;
+
+	if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
+		dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
+		pk->pm->midi_sustain = sustain;
+		pk->pm->midi_sustain_mode =
+			(0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
+		return strlen(buf);
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+		store_sustain);
+
+static struct device_attribute *sysfs_device_attr_sustain = {
+		&dev_attr_sustain,
+		};
+
+/* Output routine for the sysfs octave file */
+static ssize_t show_octave(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
+
+	return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
+		PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
+}
+
+/* Input routine for the sysfs octave file */
+static ssize_t store_octave(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+	int octave = 0;
+
+	if (sscanf(buf, "%d", &octave) > 0 &&
+		octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
+		dbg_hid("pcmidi sysfs write octave=%d\n", octave);
+		pk->pm->midi_octave = octave;
+		return strlen(buf);
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+		store_octave);
+
+static struct device_attribute *sysfs_device_attr_octave = {
+		&dev_attr_octave,
+		};
+
+
+static void pcmidi_send_note(struct pcmidi_snd *pm,
+	unsigned char status, unsigned char note, unsigned char velocity)
+{
+	unsigned long flags;
+	unsigned char buffer[3];
+
+	buffer[0] = status;
+	buffer[1] = note;
+	buffer[2] = velocity;
+
+	spin_lock_irqsave(&pm->rawmidi_in_lock, flags);
+
+	if (!pm->in_substream)
+		goto drop_note;
+	if (!test_bit(pm->in_substream->number, &pm->in_triggered))
+		goto drop_note;
+
+	snd_rawmidi_receive(pm->in_substream, buffer, 3);
+
+drop_note:
+	spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags);
+
+	return;
+}
+
+void pcmidi_sustained_note_release(unsigned long data)
+{
+	struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+
+	pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
+	pms->in_use = 0;
+}
+
+void init_sustain_timers(struct pcmidi_snd *pm)
+{
+	struct pcmidi_sustain *pms;
+	unsigned i;
+
+	for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+		pms = &pm->sustained_notes[i];
+		pms->in_use = 0;
+		pms->pm = pm;
+		setup_timer(&pms->timer, pcmidi_sustained_note_release,
+			(unsigned long)pms);
+	}
+}
+
+void stop_sustain_timers(struct pcmidi_snd *pm)
+{
+	struct pcmidi_sustain *pms;
+	unsigned i;
+
+	for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+		pms = &pm->sustained_notes[i];
+		pms->in_use = 1;
+		del_timer_sync(&pms->timer);
+	}
+}
+
+static int pcmidi_get_output_report(struct pcmidi_snd *pm)
+{
+	struct hid_device *hdev = pm->pk->hdev;
+	struct hid_report *report;
+
+	list_for_each_entry(report,
+		&hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) {
+		if (!(6 == report->id))
+			continue;
+
+		if (report->maxfield < 1) {
+			dev_err(&hdev->dev, "output report is empty\n");
+			break;
+		}
+		if (report->field[0]->report_count != 2) {
+			dev_err(&hdev->dev, "field count too low\n");
+			break;
+		}
+		pm->pcmidi_report6 = report;
+		return 0;
+	}
+	/* should never get here */
+	return -ENODEV;
+}
+
+static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
+{
+	struct hid_device *hdev = pm->pk->hdev;
+	struct hid_report *report = pm->pcmidi_report6;
+	report->field[0]->value[0] = 0x01;
+	report->field[0]->value[1] = state;
+
+	usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data)
+{
+	u32 bit_mask;
+
+	bit_mask = data[1];
+	bit_mask = (bit_mask << 8) | data[2];
+	bit_mask = (bit_mask << 8) | data[3];
+
+	dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+
+	/*KEY_MAIL or octave down*/
+	if (pm->midi_mode && bit_mask == 0x004000) {
+		/* octave down */
+		pm->midi_octave--;
+		if (pm->midi_octave < -2)
+			pm->midi_octave = -2;
+		dbg_hid("pcmidi mode: %d octave: %d\n",
+			pm->midi_mode, pm->midi_octave);
+		return 1;
+	}
+	/*KEY_WWW or sustain*/
+	else if (pm->midi_mode && bit_mask == 0x000004) {
+		/* sustain on/off*/
+		pm->midi_sustain_mode ^= 0x1;
+		return 1;
+	}
+
+	return 0; /* continue key processing */
+}
+
+static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size)
+{
+	struct pcmidi_sustain *pms;
+	unsigned i, j;
+	unsigned char status, note, velocity;
+
+	unsigned num_notes = (size-1)/2;
+	for (j = 0; j < num_notes; j++)	{
+		note = data[j*2+1];
+		velocity = data[j*2+2];
+
+		if (note < 0x81) { /* note on */
+			status = 128 + 16 + pm->midi_channel; /* 1001nnnn */
+			note = note - 0x54 + PCMIDI_MIDDLE_C +
+				(pm->midi_octave * 12);
+			if (0 == velocity)
+				velocity = 1; /* force note on */
+		} else { /* note off */
+			status = 128 + pm->midi_channel; /* 1000nnnn */
+			note = note - 0x94 + PCMIDI_MIDDLE_C +
+				(pm->midi_octave*12);
+
+			if (pm->midi_sustain_mode) {
+				for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+					pms = &pm->sustained_notes[i];
+					if (!pms->in_use) {
+						pms->status = status;
+						pms->note = note;
+						pms->velocity = velocity;
+						pms->in_use = 1;
+
+						mod_timer(&pms->timer,
+							jiffies +
+					msecs_to_jiffies(pm->midi_sustain));
+						return 1;
+					}
+				}
+			}
+		}
+		pcmidi_send_note(pm, status, note, velocity);
+	}
+
+	return 1;
+}
+
+static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
+{
+	unsigned	key;
+	u32		bit_mask;
+	u32		bit_index;
+
+	bit_mask = data[1];
+	bit_mask = (bit_mask << 8) | data[2];
+	bit_mask = (bit_mask << 8) | data[3];
+
+	/* break keys */
+	for (bit_index = 0; bit_index < 24; bit_index++) {
+		key = pm->last_key[bit_index];
+		if (!((0x01 << bit_index) & bit_mask)) {
+			input_event(pm->input_ep82, EV_KEY,
+				pm->last_key[bit_index], 0);
+				pm->last_key[bit_index] = 0;
+		}
+	}
+
+	/* make keys */
+	for (bit_index = 0; bit_index < 24; bit_index++) {
+		key = 0;
+		switch ((0x01 << bit_index) & bit_mask) {
+		case 0x000010: /* Fn lock*/
+			pm->fn_state ^= 0x000010;
+			if (pm->fn_state)
+				pcmidi_submit_output_report(pm, 0xc5);
+			else
+				pcmidi_submit_output_report(pm, 0xc6);
+			continue;
+		case 0x020000: /* midi launcher..send a key (qwerty) or not? */
+			pcmidi_submit_output_report(pm, 0xc1);
+			pm->midi_mode ^= 0x01;
+
+			dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+			continue;
+		case 0x100000: /* KEY_MESSENGER or octave up */
+			dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+			if (pm->midi_mode) {
+				pm->midi_octave++;
+				if (pm->midi_octave > 2)
+					pm->midi_octave = 2;
+				dbg_hid("pcmidi mode: %d octave: %d\n",
+					pm->midi_mode, pm->midi_octave);
+			    continue;
+			} else
+				key = KEY_MESSENGER;
+			break;
+		case 0x400000:
+			key = KEY_CALENDAR;
+			break;
+		case 0x080000:
+			key = KEY_ADDRESSBOOK;
+			break;
+		case 0x040000:
+			key = KEY_DOCUMENTS;
+			break;
+		case 0x800000:
+			key = KEY_WORDPROCESSOR;
+			break;
+		case 0x200000:
+			key = KEY_SPREADSHEET;
+			break;
+		case 0x010000:
+			key = KEY_COFFEE;
+			break;
+		case 0x000100:
+			key = KEY_HELP;
+			break;
+		case 0x000200:
+			key = KEY_SEND;
+			break;
+		case 0x000400:
+			key = KEY_REPLY;
+			break;
+		case 0x000800:
+			key = KEY_FORWARDMAIL;
+			break;
+		case 0x001000:
+			key = KEY_NEW;
+			break;
+		case 0x002000:
+			key = KEY_OPEN;
+			break;
+		case 0x004000:
+			key = KEY_CLOSE;
+			break;
+		case 0x008000:
+			key = KEY_SAVE;
+			break;
+		case 0x000001:
+			key = KEY_UNDO;
+			break;
+		case 0x000002:
+			key = KEY_REDO;
+			break;
+		case 0x000004:
+			key = KEY_SPELLCHECK;
+			break;
+		case 0x000008:
+			key = KEY_PRINT;
+			break;
+		}
+		if (key) {
+			input_event(pm->input_ep82, EV_KEY, key, 1);
+			pm->last_key[bit_index] = key;
+		}
+	}
+
+	return 1;
+}
+
+int pcmidi_handle_report(
+	struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
+{
+	int ret = 0;
+
+	switch (report_id) {
+	case 0x01: /* midi keys (qwerty)*/
+		ret = pcmidi_handle_report1(pm, data);
+		break;
+	case 0x03: /* midi keyboard (musical)*/
+		ret = pcmidi_handle_report3(pm, data, size);
+		break;
+	case 0x04: /* multimedia/midi keys (qwerty)*/
+		ret = pcmidi_handle_report4(pm, data);
+		break;
+	}
+	return ret;
+}
+
+void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+{
+	/* reassigned functionality for N/A keys
+		MY PICTURES =>	KEY_WORDPROCESSOR
+		MY MUSIC=>	KEY_SPREADSHEET
+	*/
+	unsigned int keys[] = {
+		KEY_FN,
+		KEY_MESSENGER, KEY_CALENDAR,
+		KEY_ADDRESSBOOK, KEY_DOCUMENTS,
+		KEY_WORDPROCESSOR,
+		KEY_SPREADSHEET,
+		KEY_COFFEE,
+		KEY_HELP, KEY_SEND,
+		KEY_REPLY, KEY_FORWARDMAIL,
+		KEY_NEW, KEY_OPEN,
+		KEY_CLOSE, KEY_SAVE,
+		KEY_UNDO, KEY_REDO,
+		KEY_SPELLCHECK,	KEY_PRINT,
+		0
+	};
+
+	unsigned int *pkeys = &keys[0];
+	unsigned short i;
+
+	if (pm->ifnum != 1)  /* only set up ONCE for interace 1 */
+		return;
+
+	pm->input_ep82 = input;
+
+	for (i = 0; i < 24; i++)
+		pm->last_key[i] = 0;
+
+	while (*pkeys != 0) {
+		set_bit(*pkeys, pm->input_ep82->keybit);
+		++pkeys;
+	}
+}
+
+static int pcmidi_set_operational(struct pcmidi_snd *pm)
+{
+	if (pm->ifnum != 1)
+		return 0; /* only set up ONCE for interace 1 */
+
+	pcmidi_get_output_report(pm);
+	pcmidi_submit_output_report(pm, 0xc1);
+	return 0;
+}
+
+static int pcmidi_snd_free(struct snd_device *dev)
+{
+	return 0;
+}
+
+static int pcmidi_in_open(struct snd_rawmidi_substream *substream)
+{
+	struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+	dbg_hid("pcmidi in open\n");
+	pm->in_substream = substream;
+	return 0;
+}
+
+static int pcmidi_in_close(struct snd_rawmidi_substream *substream)
+{
+	dbg_hid("pcmidi in close\n");
+	return 0;
+}
+
+static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+	struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+	dbg_hid("pcmidi in trigger %d\n", up);
+
+	pm->in_triggered = up;
+}
+
+static struct snd_rawmidi_ops pcmidi_in_ops = {
+	.open = pcmidi_in_open,
+	.close = pcmidi_in_close,
+	.trigger = pcmidi_in_trigger
+};
+
+int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+{
+	static int dev;
+	struct snd_card *card;
+	struct snd_rawmidi *rwmidi;
+	int err;
+
+	static struct snd_device_ops ops = {
+		.dev_free = pcmidi_snd_free,
+	};
+
+	if (pm->ifnum != 1)
+		return 0; /* only set up midi device ONCE for interace 1 */
+
+	if (dev >= SNDRV_CARDS)
+		return -ENODEV;
+
+	if (!enable[dev]) {
+		dev++;
+		return -ENOENT;
+	}
+
+	/* Setup sound card */
+
+	err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+	if (err < 0) {
+		pk_error("failed to create pc-midi sound card\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+	pm->card = card;
+
+	/* Setup sound device */
+	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops);
+	if (err < 0) {
+		pk_error("failed to create pc-midi sound device: error %d\n",
+			err);
+		goto fail;
+	}
+
+	strncpy(card->driver, shortname, sizeof(card->driver));
+	strncpy(card->shortname, shortname, sizeof(card->shortname));
+	strncpy(card->longname, longname, sizeof(card->longname));
+
+	/* Set up rawmidi */
+	err = snd_rawmidi_new(card, card->shortname, 0,
+			      0, 1, &rwmidi);
+	if (err < 0) {
+		pk_error("failed to create pc-midi rawmidi device: error %d\n",
+			err);
+		goto fail;
+	}
+	pm->rwmidi = rwmidi;
+	strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name));
+	rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT;
+	rwmidi->private_data = pm;
+
+	snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+		&pcmidi_in_ops);
+
+	snd_card_set_dev(card, &pm->pk->hdev->dev);
+
+	/* create sysfs variables */
+	err = device_create_file(&pm->pk->hdev->dev,
+				 sysfs_device_attr_channel);
+	if (err < 0) {
+		pk_error("failed to create sysfs attribute channel: error %d\n",
+			err);
+		goto fail;
+	}
+
+	err = device_create_file(&pm->pk->hdev->dev,
+				sysfs_device_attr_sustain);
+	if (err < 0) {
+		pk_error("failed to create sysfs attribute sustain: error %d\n",
+			err);
+		goto fail_attr_sustain;
+	}
+
+	err = device_create_file(&pm->pk->hdev->dev,
+			 sysfs_device_attr_octave);
+	if (err < 0) {
+		pk_error("failed to create sysfs attribute octave: error %d\n",
+			err);
+		goto fail_attr_octave;
+	}
+
+	spin_lock_init(&pm->rawmidi_in_lock);
+
+	init_sustain_timers(pm);
+	pcmidi_set_operational(pm);
+
+	/* register it */
+	err = snd_card_register(card);
+	if (err < 0) {
+		pk_error("failed to register pc-midi sound card: error %d\n",
+			 err);
+			 goto fail_register;
+	}
+
+	dbg_hid("pcmidi_snd_initialise finished ok\n");
+	return 0;
+
+fail_register:
+	stop_sustain_timers(pm);
+	device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
+fail_attr_octave:
+	device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
+fail_attr_sustain:
+	device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
+fail:
+	if (pm->card) {
+		snd_card_free(pm->card);
+		pm->card = NULL;
+	}
+	return err;
+}
+
+int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+{
+	if (pm->card) {
+		stop_sustain_timers(pm);
+
+		device_remove_file(&pm->pk->hdev->dev,
+			sysfs_device_attr_channel);
+		device_remove_file(&pm->pk->hdev->dev,
+			sysfs_device_attr_sustain);
+		device_remove_file(&pm->pk->hdev->dev,
+			sysfs_device_attr_octave);
+
+		snd_card_disconnect(pm->card);
+		snd_card_free_when_closed(pm->card);
+	}
+
+	return 0;
+}
+
+/*
+ * PC-MIDI report descriptor for report id is wrong.
+ */
+static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+		unsigned int rsize)
+{
+	if (rsize == 178 &&
+	      rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
+	      rdesc[113] == 0xff) {
+		dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
+			"descriptor\n");
+
+		rdesc[144] = 0x18; /* report 4: was 0x10 report count */
+	}
+}
+
+static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+	struct pcmidi_snd *pm;
+
+	pm = pk->pm;
+
+	if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
+		1 == pm->ifnum) {
+		pcmidi_setup_extra_keys(pm, hi->input);
+		return 0;
+	}
+
+	return 0;
+}
+
+
+static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+	u8 *data, int size)
+{
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+	int ret = 0;
+
+	if (1 == pk->pm->ifnum) {
+		if (report->id == data[0])
+			switch (report->id) {
+			case 0x01: /* midi keys (qwerty)*/
+			case 0x03: /* midi keyboard (musical)*/
+			case 0x04: /* extra/midi keys (qwerty)*/
+				ret = pcmidi_handle_report(pk->pm,
+						report->id, data, size);
+				break;
+			}
+	}
+
+	return ret;
+}
+
+static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+	unsigned long quirks = id->driver_data;
+	struct pk_device *pk;
+	struct pcmidi_snd *pm = NULL;
+
+	pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+	if (pk == NULL) {
+		dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+		return -ENOMEM;
+	}
+
+	pk->hdev = hdev;
+
+	pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+	if (pm == NULL) {
+		dev_err(&hdev->dev,
+			"prodikeys: can't alloc descriptor\n");
+		ret = -ENOMEM;
+		goto err_free;
+	}
+
+	pm->pk = pk;
+	pk->pm = pm;
+	pm->ifnum = ifnum;
+
+	hid_set_drvdata(hdev, pk);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+		goto err_free;
+	}
+
+	if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */
+		hdev->quirks |= HID_QUIRK_NOGET;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+		goto err_free;
+	}
+
+	ret = pcmidi_snd_initialise(pm);
+	if (ret < 0)
+		goto err_stop;
+
+	return 0;
+err_stop:
+	hid_hw_stop(hdev);
+err_free:
+	if (pm != NULL)
+		kfree(pm);
+
+	kfree(pk);
+	return ret;
+}
+
+static void pk_remove(struct hid_device *hdev)
+{
+	struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+	struct pcmidi_snd *pm;
+
+	pm = pk->pm;
+	if (pm) {
+		pcmidi_snd_terminate(pm);
+		kfree(pm);
+	}
+
+	hid_hw_stop(hdev);
+
+	kfree(pk);
+}
+
+static const struct hid_device_id pk_devices[] = {
+	{HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS,
+		USB_DEVICE_ID_PRODIKEYS_PCMIDI),
+	    .driver_data = PK_QUIRK_NOGET},
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, pk_devices);
+
+static struct hid_driver pk_driver = {
+	.name = "prodikeys",
+	.id_table = pk_devices,
+	.report_fixup = pk_report_fixup,
+	.input_mapping = pk_input_mapping,
+	.raw_event = pk_raw_event,
+	.probe = pk_probe,
+	.remove = pk_remove,
+};
+
+static int pk_init(void)
+{
+	int ret;
+
+	ret = hid_register_driver(&pk_driver);
+	if (ret)
+		printk(KERN_ERR "can't register prodikeys driver\n");
+
+	return ret;
+}
+
+static void pk_exit(void)
+{
+	hid_unregister_driver(&pk_driver);
+}
+
+module_init(pk_init);
+module_exit(pk_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
new file mode 100644
index 0000000..66e6940
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.c
@@ -0,0 +1,994 @@
+/*
+ * Roccat Kone driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
+ * part. The keyboard part enables the mouse to execute stored macros with mixed
+ * key- and button-events.
+ *
+ * TODO implement on-the-fly polling-rate change
+ *      The windows driver has the ability to change the polling rate of the
+ *      device on the press of a mousebutton.
+ *      Is it possible to remove and reinstall the urb in raw-event- or any
+ *      other handler, or to defer this action to be executed somewhere else?
+ *
+ * TODO implement notification mechanism for overlong macro execution
+ *      If user wants to execute an overlong macro only the names of macroset
+ *      and macro are given. Should userland tap hidraw or is there an
+ *      additional streaming mechanism?
+ *
+ * TODO is it possible to overwrite group for sysfs attributes via udev?
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat-kone.h"
+
+static void kone_set_settings_checksum(struct kone_settings *settings)
+{
+	uint16_t checksum = 0;
+	unsigned char *address = (unsigned char *)settings;
+	int i;
+
+	for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
+		checksum += *address;
+	settings->checksum = cpu_to_le16(checksum);
+}
+
+/*
+ * Checks success after writing data to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_check_write(struct usb_device *usb_dev)
+{
+	int len;
+	unsigned char *data;
+
+	data = kmalloc(1, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	do {
+		/*
+		 * Mouse needs 50 msecs until it says ok, but there are
+		 * 30 more msecs needed for next write to work.
+		 */
+		msleep(80);
+
+		len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+				USB_REQ_CLEAR_FEATURE,
+				USB_TYPE_CLASS | USB_RECIP_INTERFACE |
+				USB_DIR_IN,
+				kone_command_confirm_write, 0, data, 1,
+				USB_CTRL_SET_TIMEOUT);
+
+		if (len != 1) {
+			kfree(data);
+			return -EIO;
+		}
+
+		/*
+		 * value of 3 seems to mean something like
+		 * "not finished yet, but it looks good"
+		 * So check again after a moment.
+		 */
+	} while (*data == 3);
+
+	if (*data == 1) { /* everything alright */
+		kfree(data);
+		return 0;
+	} else { /* unknown answer */
+		dev_err(&usb_dev->dev, "got retval %d when checking write\n",
+				*data);
+		kfree(data);
+		return -EIO;
+	}
+}
+
+/*
+ * Reads settings from mouse and stores it in @buf
+ * @buf has to be alloced with GFP_KERNEL
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_settings(struct usb_device *usb_dev,
+		struct kone_settings *buf)
+{
+	int len;
+
+	len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+			USB_REQ_CLEAR_FEATURE,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+			kone_command_settings, 0, buf,
+			sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
+
+	if (len != sizeof(struct kone_settings))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Writes settings from @buf to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_settings(struct usb_device *usb_dev,
+		struct kone_settings const *settings)
+{
+	int len;
+
+	len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_CONFIGURATION,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			kone_command_settings, 0, (char *)settings,
+			sizeof(struct kone_settings),
+			USB_CTRL_SET_TIMEOUT);
+
+	if (len != sizeof(struct kone_settings))
+		return -EIO;
+
+	if (kone_check_write(usb_dev))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Reads profile data from mouse and stores it in @buf
+ * @number: profile number to read
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_profile(struct usb_device *usb_dev,
+		struct kone_profile *buf, int number)
+{
+	int len;
+
+	if (number < 1 || number > 5)
+		return -EINVAL;
+
+	len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+			USB_REQ_CLEAR_FEATURE,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+			kone_command_profile, number, buf,
+			sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
+
+	if (len != sizeof(struct kone_profile))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Writes profile data to mouse.
+ * @number: profile number to write
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_profile(struct usb_device *usb_dev,
+		struct kone_profile const *profile, int number)
+{
+	int len;
+
+	if (number < 1 || number > 5)
+		return -EINVAL;
+
+	len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_CONFIGURATION,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			kone_command_profile, number, (char *)profile,
+			sizeof(struct kone_profile),
+			USB_CTRL_SET_TIMEOUT);
+
+	if (len != sizeof(struct kone_profile))
+		return len;
+
+	if (kone_check_write(usb_dev))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Reads value of "fast-clip-weight" and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_weight(struct usb_device *usb_dev, int *result)
+{
+	int len;
+	uint8_t *data;
+
+	data = kmalloc(1, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+			USB_REQ_CLEAR_FEATURE,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+			kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+
+	if (len != 1) {
+		kfree(data);
+		return -EIO;
+	}
+	*result = (int)*data;
+	kfree(data);
+	return 0;
+}
+
+/*
+ * Reads firmware_version of mouse and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
+{
+	int len;
+	unsigned char *data;
+
+	data = kmalloc(2, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+			USB_REQ_CLEAR_FEATURE,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+			kone_command_firmware_version, 0, data, 2,
+			USB_CTRL_SET_TIMEOUT);
+
+	if (len != 2) {
+		kfree(data);
+		return -EIO;
+	}
+	*result = le16_to_cpu(*data);
+	kfree(data);
+	return 0;
+}
+
+static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+	if (off >= sizeof(struct kone_settings))
+		return 0;
+
+	if (off + count > sizeof(struct kone_settings))
+		count = sizeof(struct kone_settings) - off;
+
+	mutex_lock(&kone->kone_lock);
+	memcpy(buf, &kone->settings + off, count);
+	mutex_unlock(&kone->kone_lock);
+
+	return count;
+}
+
+/*
+ * Writing settings automatically activates startup_profile.
+ * This function keeps values in kone_device up to date and assumes that in
+ * case of error the old data is still valid
+ */
+static ssize_t kone_sysfs_write_settings(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval = 0, difference;
+
+	/* I need to get my data in one piece */
+	if (off != 0 || count != sizeof(struct kone_settings))
+		return -EINVAL;
+
+	mutex_lock(&kone->kone_lock);
+	difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+	if (difference) {
+		retval = kone_set_settings(usb_dev,
+				(struct kone_settings const *)buf);
+		if (!retval)
+			memcpy(&kone->settings, buf,
+					sizeof(struct kone_settings));
+	}
+	mutex_unlock(&kone->kone_lock);
+
+	if (retval)
+		return retval;
+
+	/*
+	 * If we get here, treat settings as okay and update actual values
+	 * according to startup_profile
+	 */
+	kone->actual_profile = kone->settings.startup_profile;
+	kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+	return sizeof(struct kone_settings);
+}
+
+static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count, int number) {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+	if (off >= sizeof(struct kone_profile))
+		return 0;
+
+	if (off + count > sizeof(struct kone_profile))
+		count = sizeof(struct kone_profile) - off;
+
+	mutex_lock(&kone->kone_lock);
+	memcpy(buf, &kone->profiles[number - 1], sizeof(struct kone_profile));
+	mutex_unlock(&kone->kone_lock);
+
+	return count;
+}
+
+static ssize_t kone_sysfs_read_profile1(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_read_profile2(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_read_profile3(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_read_profile4(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_read_profile5(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
+}
+
+/* Writes data only if different to stored data */
+static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count, int number) {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	struct kone_profile *profile;
+	int retval = 0, difference;
+
+	/* I need to get my data in one piece */
+	if (off != 0 || count != sizeof(struct kone_profile))
+		return -EINVAL;
+
+	profile = &kone->profiles[number - 1];
+
+	mutex_lock(&kone->kone_lock);
+	difference = memcmp(buf, profile, sizeof(struct kone_profile));
+	if (difference) {
+		retval = kone_set_profile(usb_dev,
+				(struct kone_profile const *)buf, number);
+		if (!retval)
+			memcpy(profile, buf, sizeof(struct kone_profile));
+	}
+	mutex_unlock(&kone->kone_lock);
+
+	if (retval)
+		return retval;
+
+	return sizeof(struct kone_profile);
+}
+
+static ssize_t kone_sysfs_write_profile1(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_write_profile2(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_write_profile3(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_write_profile4(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_write_profile5(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count) {
+	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
+}
+
+static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+}
+
+static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+}
+
+/* weight is read each time, since we don't get informed when it's changed */
+static ssize_t kone_sysfs_show_weight(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int weight = 0;
+	int retval;
+
+	mutex_lock(&kone->kone_lock);
+	retval = kone_get_weight(usb_dev, &weight);
+	mutex_unlock(&kone->kone_lock);
+
+	if (retval)
+		return retval;
+	return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+}
+
+static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+}
+
+static ssize_t kone_sysfs_show_tcu(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+}
+
+static int kone_tcu_command(struct usb_device *usb_dev, int number)
+{
+	int len;
+	char *value;
+
+	value = kmalloc(1, GFP_KERNEL);
+	if (!value)
+		return -ENOMEM;
+
+	*value = number;
+
+	len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_CONFIGURATION,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			kone_command_calibrate, 0, value, 1,
+			USB_CTRL_SET_TIMEOUT);
+
+	kfree(value);
+	return ((len != 1) ? -EIO : 0);
+}
+
+/*
+ * Calibrating the tcu is the only action that changes settings data inside the
+ * mouse, so this data needs to be reread
+ */
+static ssize_t kone_sysfs_set_tcu(struct device *dev,
+		struct device_attribute *attr, char const *buf, size_t size)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+	unsigned long state;
+
+	retval = strict_strtoul(buf, 10, &state);
+	if (retval)
+		return retval;
+
+	if (state != 0 && state != 1)
+		return -EINVAL;
+
+	mutex_lock(&kone->kone_lock);
+
+	if (state == 1) { /* state activate */
+		retval = kone_tcu_command(usb_dev, 1);
+		if (retval)
+			goto exit_unlock;
+		retval = kone_tcu_command(usb_dev, 2);
+		if (retval)
+			goto exit_unlock;
+		ssleep(5); /* tcu needs this time for calibration */
+		retval = kone_tcu_command(usb_dev, 3);
+		if (retval)
+			goto exit_unlock;
+		retval = kone_tcu_command(usb_dev, 0);
+		if (retval)
+			goto exit_unlock;
+		retval = kone_tcu_command(usb_dev, 4);
+		if (retval)
+			goto exit_unlock;
+		/*
+		 * Kone needs this time to settle things.
+		 * Reading settings too early will result in invalid data.
+		 * Roccat's driver waits 1 sec, maybe this time could be
+		 * shortened.
+		 */
+		ssleep(1);
+	}
+
+	/* calibration changes values in settings, so reread */
+	retval = kone_get_settings(usb_dev, &kone->settings);
+	if (retval)
+		goto exit_no_settings;
+
+	/* only write settings back if activation state is different */
+	if (kone->settings.tcu != state) {
+		kone->settings.tcu = state;
+		kone_set_settings_checksum(&kone->settings);
+
+		retval = kone_set_settings(usb_dev, &kone->settings);
+		if (retval) {
+			dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+			/*
+			 * try to reread valid settings into buffer overwriting
+			 * first error code
+			 */
+			retval = kone_get_settings(usb_dev, &kone->settings);
+			if (retval)
+				goto exit_no_settings;
+			goto exit_unlock;
+		}
+	}
+
+	retval = size;
+exit_no_settings:
+	dev_err(&usb_dev->dev, "couldn't read settings\n");
+exit_unlock:
+	mutex_unlock(&kone->kone_lock);
+	return retval;
+}
+
+static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+}
+
+static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
+		struct device_attribute *attr, char const *buf, size_t size)
+{
+	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+	unsigned long new_startup_profile;
+
+	retval = strict_strtoul(buf, 10, &new_startup_profile);
+	if (retval)
+		return retval;
+
+	if (new_startup_profile  < 1 || new_startup_profile > 5)
+		return -EINVAL;
+
+	mutex_lock(&kone->kone_lock);
+
+	kone->settings.startup_profile = new_startup_profile;
+	kone_set_settings_checksum(&kone->settings);
+
+	retval = kone_set_settings(usb_dev, &kone->settings);
+
+	mutex_unlock(&kone->kone_lock);
+
+	if (retval)
+		return retval;
+
+	/* changing the startup profile immediately activates this profile */
+	kone->actual_profile = new_startup_profile;
+	kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+	return size;
+}
+
+/*
+ * This file is used by userland software to find devices that are handled by
+ * this driver. This provides a consistent way for actual and older kernels
+ * where this driver replaced usbhid instead of generic-usb.
+ * Driver capabilities are determined by version number.
+ */
+static ssize_t kone_sysfs_show_driver_version(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, ROCCAT_KONE_DRIVER_VERSION "\n");
+}
+
+/*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum kone_polling_rates to
+ * get real value.
+ */
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+
+/*
+ * The mouse can be equipped with one of four supplied weights from 5 to 20
+ * grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation by
+ * software. Refer to enum kone_weights to get corresponding real weight.
+ */
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+
+/*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation, to get
+ * the real version number the decimal point has to be shifted 2 positions to
+ * the left. E.g. a value of 138 means 1.38.
+ */
+static DEVICE_ATTR(firmware_version, 0440,
+		kone_sysfs_show_firmware_version, NULL);
+
+/*
+ * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
+ * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
+ */
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
+
+/* Prints and takes the number of the profile the mouse starts with */
+static DEVICE_ATTR(startup_profile, 0660,
+		kone_sysfs_show_startup_profile,
+		kone_sysfs_set_startup_profile);
+
+static DEVICE_ATTR(kone_driver_version, 0440,
+		kone_sysfs_show_driver_version, NULL);
+
+static struct attribute *kone_attributes[] = {
+		&dev_attr_actual_dpi.attr,
+		&dev_attr_actual_profile.attr,
+		&dev_attr_weight.attr,
+		&dev_attr_firmware_version.attr,
+		&dev_attr_tcu.attr,
+		&dev_attr_startup_profile.attr,
+		&dev_attr_kone_driver_version.attr,
+		NULL
+};
+
+static struct attribute_group kone_attribute_group = {
+		.attrs = kone_attributes
+};
+
+static struct bin_attribute kone_settings_attr = {
+	.attr = { .name = "settings", .mode = 0660 },
+	.size = sizeof(struct kone_settings),
+	.read = kone_sysfs_read_settings,
+	.write = kone_sysfs_write_settings
+};
+
+static struct bin_attribute kone_profile1_attr = {
+	.attr = { .name = "profile1", .mode = 0660 },
+	.size = sizeof(struct kone_profile),
+	.read = kone_sysfs_read_profile1,
+	.write = kone_sysfs_write_profile1
+};
+
+static struct bin_attribute kone_profile2_attr = {
+	.attr = { .name = "profile2", .mode = 0660 },
+	.size = sizeof(struct kone_profile),
+	.read = kone_sysfs_read_profile2,
+	.write = kone_sysfs_write_profile2
+};
+
+static struct bin_attribute kone_profile3_attr = {
+	.attr = { .name = "profile3", .mode = 0660 },
+	.size = sizeof(struct kone_profile),
+	.read = kone_sysfs_read_profile3,
+	.write = kone_sysfs_write_profile3
+};
+
+static struct bin_attribute kone_profile4_attr = {
+	.attr = { .name = "profile4", .mode = 0660 },
+	.size = sizeof(struct kone_profile),
+	.read = kone_sysfs_read_profile4,
+	.write = kone_sysfs_write_profile4
+};
+
+static struct bin_attribute kone_profile5_attr = {
+	.attr = { .name = "profile5", .mode = 0660 },
+	.size = sizeof(struct kone_profile),
+	.read = kone_sysfs_read_profile5,
+	.write = kone_sysfs_write_profile5
+};
+
+static int kone_create_sysfs_attributes(struct usb_interface *intf)
+{
+	int retval;
+
+	retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
+	if (retval)
+		goto exit_1;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
+	if (retval)
+		goto exit_2;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+	if (retval)
+		goto exit_3;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+	if (retval)
+		goto exit_4;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+	if (retval)
+		goto exit_5;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+	if (retval)
+		goto exit_6;
+
+	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+	if (retval)
+		goto exit_7;
+
+	return 0;
+
+exit_7:
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+exit_6:
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+exit_5:
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+exit_4:
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+exit_3:
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+exit_2:
+	sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+exit_1:
+	return retval;
+}
+
+static void kone_remove_sysfs_attributes(struct usb_interface *intf)
+{
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+	sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+	sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+}
+
+static int kone_init_kone_device_struct(struct usb_device *usb_dev,
+		struct kone_device *kone)
+{
+	uint i;
+	int retval;
+
+	mutex_init(&kone->kone_lock);
+
+	for (i = 0; i < 5; ++i) {
+		retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
+		if (retval)
+			return retval;
+	}
+
+	retval = kone_get_settings(usb_dev, &kone->settings);
+	if (retval)
+		return retval;
+
+	retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
+	if (retval)
+		return retval;
+
+	kone->actual_profile = kone->settings.startup_profile;
+	kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+
+	return 0;
+}
+
+/*
+ * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
+ * mousepart if usb_hid is compiled into the kernel and kone is compiled as
+ * module.
+ * Secial behaviour is bound only to mousepart since only mouseevents contain
+ * additional notifications.
+ */
+static int kone_init_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct usb_device *usb_dev = interface_to_usbdev(intf);
+	struct kone_device *kone;
+	int retval;
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			== USB_INTERFACE_PROTOCOL_MOUSE) {
+
+		kone = kzalloc(sizeof(*kone), GFP_KERNEL);
+		if (!kone) {
+			dev_err(&hdev->dev, "can't alloc device descriptor\n");
+			return -ENOMEM;
+		}
+		hid_set_drvdata(hdev, kone);
+
+		retval = kone_init_kone_device_struct(usb_dev, kone);
+		if (retval) {
+			dev_err(&hdev->dev,
+					"couldn't init struct kone_device\n");
+			goto exit_free;
+		}
+		retval = kone_create_sysfs_attributes(intf);
+		if (retval) {
+			dev_err(&hdev->dev, "cannot create sysfs files\n");
+			goto exit_free;
+		}
+	} else {
+		hid_set_drvdata(hdev, NULL);
+	}
+
+	return 0;
+exit_free:
+	kfree(kone);
+	return retval;
+}
+
+
+static void kone_remove_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			== USB_INTERFACE_PROTOCOL_MOUSE) {
+		kone_remove_sysfs_attributes(intf);
+		kfree(hid_get_drvdata(hdev));
+	}
+}
+
+static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int retval;
+
+	retval = hid_parse(hdev);
+	if (retval) {
+		dev_err(&hdev->dev, "parse failed\n");
+		goto exit;
+	}
+
+	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (retval) {
+		dev_err(&hdev->dev, "hw start failed\n");
+		goto exit;
+	}
+
+	retval = kone_init_specials(hdev);
+	if (retval) {
+		dev_err(&hdev->dev, "couldn't install mouse\n");
+		goto exit_stop;
+	}
+
+	return 0;
+
+exit_stop:
+	hid_hw_stop(hdev);
+exit:
+	return retval;
+}
+
+static void kone_remove(struct hid_device *hdev)
+{
+	kone_remove_specials(hdev);
+	hid_hw_stop(hdev);
+}
+
+/* handle special events and keep actual profile and dpi values up to date */
+static void kone_keep_values_up_to_date(struct kone_device *kone,
+		struct kone_mouse_event const *event)
+{
+	switch (event->event) {
+	case kone_mouse_event_switch_profile:
+	case kone_mouse_event_osd_profile:
+		kone->actual_profile = event->value;
+		kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
+				startup_dpi;
+		break;
+	case kone_mouse_event_switch_dpi:
+	case kone_mouse_event_osd_dpi:
+		kone->actual_dpi = event->value;
+		break;
+	}
+}
+
+/*
+ * Is called for keyboard- and mousepart.
+ * Only mousepart gets informations about special events in its extended event
+ * structure.
+ */
+static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
+		u8 *data, int size)
+{
+	struct kone_device *kone = hid_get_drvdata(hdev);
+	struct kone_mouse_event *event = (struct kone_mouse_event *)data;
+
+	/* keyboard events are always processed by default handler */
+	if (size != sizeof(struct kone_mouse_event))
+		return 0;
+
+	/*
+	 * Firmware 1.38 introduced new behaviour for tilt and special buttons.
+	 * Pressed button is reported in each movement event.
+	 * Workaround sends only one event per press.
+	 */
+	if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5))
+		memcpy(&kone->last_mouse_event, event,
+				sizeof(struct kone_mouse_event));
+	else
+		memset(&event->tilt, 0, 5);
+
+	kone_keep_values_up_to_date(kone, event);
+
+	return 0; /* always do further processing */
+}
+
+static const struct hid_device_id kone_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(hid, kone_devices);
+
+static struct hid_driver kone_driver = {
+		.name = "kone",
+		.id_table = kone_devices,
+		.probe = kone_probe,
+		.remove = kone_remove,
+		.raw_event = kone_raw_event
+};
+
+static int __init kone_init(void)
+{
+	return hid_register_driver(&kone_driver);
+}
+
+static void __exit kone_exit(void)
+{
+	hid_unregister_driver(&kone_driver);
+}
+
+module_init(kone_init);
+module_exit(kone_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
new file mode 100644
index 0000000..b413b10
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.h
@@ -0,0 +1,224 @@
+#ifndef __HID_ROCCAT_KONE_H
+#define __HID_ROCCAT_KONE_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+#define ROCCAT_KONE_DRIVER_VERSION "v0.3.1"
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct kone_keystroke {
+	uint8_t key;
+	uint8_t action;
+	uint16_t period; /* in milliseconds */
+};
+
+enum kone_keystroke_buttons {
+	kone_keystroke_button_1 = 0xf0, /* left mouse button */
+	kone_keystroke_button_2 = 0xf1, /* right mouse button */
+	kone_keystroke_button_3 = 0xf2, /* wheel */
+	kone_keystroke_button_9 = 0xf3, /* side button up */
+	kone_keystroke_button_8 = 0xf4 /* side button down */
+};
+
+enum kone_keystroke_actions {
+	kone_keystroke_action_press = 0,
+	kone_keystroke_action_release = 1
+};
+
+struct kone_button_info {
+	uint8_t number; /* range 1-8 */
+	uint8_t type;
+	uint8_t macro_type; /* 0 = short, 1 = overlong */
+	uint8_t macro_set_name[16]; /* can be max 15 chars long */
+	uint8_t macro_name[16]; /* can be max 15 chars long */
+	uint8_t count;
+	struct kone_keystroke keystrokes[20];
+};
+
+enum kone_button_info_types {
+	/* valid button types until firmware 1.32 */
+	kone_button_info_type_button_1 = 0x1, /* click (left mouse button) */
+	kone_button_info_type_button_2 = 0x2, /* menu (right mouse button)*/
+	kone_button_info_type_button_3 = 0x3, /* scroll (wheel) */
+	kone_button_info_type_double_click = 0x4,
+	kone_button_info_type_key = 0x5,
+	kone_button_info_type_macro = 0x6,
+	kone_button_info_type_off = 0x7,
+	/* TODO clarify function and rename */
+	kone_button_info_type_osd_xy_prescaling = 0x8,
+	kone_button_info_type_osd_dpi = 0x9,
+	kone_button_info_type_osd_profile = 0xa,
+	kone_button_info_type_button_9 = 0xb, /* ie forward */
+	kone_button_info_type_button_8 = 0xc, /* ie backward */
+	kone_button_info_type_dpi_up = 0xd, /* internal */
+	kone_button_info_type_dpi_down = 0xe, /* internal */
+	kone_button_info_type_button_7 = 0xf, /* tilt left */
+	kone_button_info_type_button_6 = 0x10, /* tilt right */
+	kone_button_info_type_profile_up = 0x11, /* internal */
+	kone_button_info_type_profile_down = 0x12, /* internal */
+	/* additional valid button types since firmware 1.38 */
+	kone_button_info_type_multimedia_open_player = 0x20,
+	kone_button_info_type_multimedia_next_track = 0x21,
+	kone_button_info_type_multimedia_prev_track = 0x22,
+	kone_button_info_type_multimedia_play_pause = 0x23,
+	kone_button_info_type_multimedia_stop = 0x24,
+	kone_button_info_type_multimedia_mute = 0x25,
+	kone_button_info_type_multimedia_volume_up = 0x26,
+	kone_button_info_type_multimedia_volume_down = 0x27
+};
+
+enum kone_button_info_numbers {
+	kone_button_top = 1,
+	kone_button_wheel_tilt_left = 2,
+	kone_button_wheel_tilt_right = 3,
+	kone_button_forward = 4,
+	kone_button_backward = 5,
+	kone_button_middle = 6,
+	kone_button_plus = 7,
+	kone_button_minus = 8,
+};
+
+struct kone_light_info {
+	uint8_t number; /* number of light 1-5 */
+	uint8_t mod;   /* 1 = on, 2 = off */
+	uint8_t red;   /* range 0x00-0xff */
+	uint8_t green; /* range 0x00-0xff */
+	uint8_t blue;  /* range 0x00-0xff */
+};
+
+struct kone_profile {
+	uint16_t size; /* always 975 */
+	uint16_t unused; /* always 0 */
+
+	/*
+	 * range 1-5
+	 * This number does not need to correspond with location where profile
+	 * saved
+	 */
+	uint8_t profile; /* range 1-5 */
+
+	uint16_t main_sensitivity; /* range 100-1000 */
+	uint8_t xy_sensitivity_enabled; /* 1 = on, 2 = off */
+	uint16_t x_sensitivity; /* range 100-1000 */
+	uint16_t y_sensitivity; /* range 100-1000 */
+	uint8_t dpi_rate; /* bit 1 = 800, ... */
+	uint8_t startup_dpi; /* range 1-6 */
+	uint8_t polling_rate; /* 1 = 125Hz, 2 = 500Hz, 3 = 1000Hz */
+	/* kone has no dcu
+	 * value is always 2 in firmwares <= 1.32 and
+	 * 1 in firmwares > 1.32
+	 */
+	uint8_t dcu_flag;
+	uint8_t light_effect_1; /* range 1-3 */
+	uint8_t light_effect_2; /* range 1-5 */
+	uint8_t light_effect_3; /* range 1-4 */
+	uint8_t light_effect_speed; /* range 0-255 */
+
+	struct kone_light_info light_infos[5];
+	/* offset is kone_button_info_numbers - 1 */
+	struct kone_button_info button_infos[8];
+
+	uint16_t checksum; /* \brief holds checksum of struct */
+};
+
+enum kone_polling_rates {
+	kone_polling_rate_125 = 1,
+	kone_polling_rate_500 = 2,
+	kone_polling_rate_1000 = 3
+};
+
+struct kone_settings {
+	uint16_t size; /* always 36 */
+	uint8_t  startup_profile; /* 1-5 */
+	uint8_t	 unknown1;
+	uint8_t  tcu; /* 0 = off, 1 = on */
+	uint8_t  unknown2[23];
+	uint8_t  calibration_data[4];
+	uint8_t  unknown3[2];
+	uint16_t checksum;
+};
+
+/*
+ * 12 byte mouse event read by interrupt_read
+ */
+struct kone_mouse_event {
+	uint8_t report_number; /* always 1 */
+	uint8_t button;
+	uint16_t x;
+	uint16_t y;
+	uint8_t wheel; /* up = 1, down = -1 */
+	uint8_t tilt; /* right = 1, left = -1 */
+	uint8_t unknown;
+	uint8_t event;
+	uint8_t value; /* press = 0, release = 1 */
+	uint8_t macro_key; /* 0 to 8 */
+};
+
+enum kone_mouse_events {
+	/* osd events are thought to be display on screen */
+	kone_mouse_event_osd_dpi = 0xa0,
+	kone_mouse_event_osd_profile = 0xb0,
+	/* TODO clarify meaning and occurence of kone_mouse_event_calibration */
+	kone_mouse_event_calibration = 0xc0,
+	kone_mouse_event_call_overlong_macro = 0xe0,
+	/* switch events notify if user changed values with mousebutton click */
+	kone_mouse_event_switch_dpi = 0xf0,
+	kone_mouse_event_switch_profile = 0xf1
+};
+
+enum kone_commands {
+	kone_command_profile = 0x5a,
+	kone_command_settings = 0x15a,
+	kone_command_firmware_version = 0x25a,
+	kone_command_weight = 0x45a,
+	kone_command_calibrate = 0x55a,
+	kone_command_confirm_write = 0x65a,
+	kone_command_firmware = 0xe5a
+};
+
+#pragma pack(pop)
+
+struct kone_device {
+	/*
+	 * Storing actual values when we get informed about changes since there
+	 * is no way of getting this information from the device on demand
+	 */
+	int actual_profile, actual_dpi;
+	/* Used for neutralizing abnormal button behaviour */
+	struct kone_mouse_event last_mouse_event;
+
+	/*
+	 * It's unlikely that multiple sysfs attributes are accessed at a time,
+	 * so only one mutex is used to secure hardware access and profiles and
+	 * settings of this struct.
+	 */
+	struct mutex kone_lock;
+
+	/*
+	 * Storing the data here reduces IO and ensures that data is available
+	 * when its needed (E.g. interrupt handler).
+	 */
+	struct kone_profile profiles[5];
+	struct kone_settings settings;
+
+	/*
+	 * firmware doesn't change unless firmware update is implemented,
+	 * so it's read only once
+	 */
+	int firmware_version;
+};
+
+#endif
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 510dd13..bda0fd6 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -7,6 +7,18 @@
  *  Copyright (c) 2006-2007 Jiri Kosina
  *  Copyright (c) 2007 Paul Walmsley
  *  Copyright (c) 2008 Jiri Slaby
+ *  Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ *
+ *  This driver supports several HID devices:
+ *
+ *  [0419:0001] Samsung IrDA remote controller (reports as Cypress USB Mouse).
+ *	various hid report fixups for different variants.
+ *
+ *  [0419:0600] Creative Desktop Wireless 6000 keyboard/mouse combo
+ *	several key mappings used from the consumer usage page
+ *	deviate from the USB HUT 1.12 standard.
+ *
  */
 
 /*
@@ -17,14 +29,13 @@
  */
 
 #include <linux/device.h>
+#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/module.h>
 
 #include "hid-ids.h"
 
 /*
- * Samsung IrDA remote controller (reports as Cypress USB Mouse).
- *
  * There are several variants for 0419:0001:
  *
  * 1. 184 byte report descriptor
@@ -43,21 +54,21 @@
  * 4. 171 byte report descriptor
  * Report #3 has an array field with logical range 0..1 instead of 1..3.
  */
-static inline void samsung_dev_trace(struct hid_device *hdev,
+static inline void samsung_irda_dev_trace(struct hid_device *hdev,
 		unsigned int rsize)
 {
 	dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
 			"descriptor\n", rsize);
 }
 
-static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 		unsigned int rsize)
 {
 	if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
 			rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
 			rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
 			rdesc[182] == 0x40) {
-		samsung_dev_trace(hdev, 184);
+		samsung_irda_dev_trace(hdev, 184);
 		rdesc[176] = 0xff;
 		rdesc[178] = 0x08;
 		rdesc[180] = 0x06;
@@ -65,24 +76,80 @@
 	} else
 	if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
 			rdesc[194] == 0x25 && rdesc[195] == 0x12) {
-		samsung_dev_trace(hdev, 203);
+		samsung_irda_dev_trace(hdev, 203);
 		rdesc[193] = 0x1;
 		rdesc[195] = 0xf;
 	} else
 	if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
 			rdesc[126] == 0x25 && rdesc[127] == 0x11) {
-		samsung_dev_trace(hdev, 135);
+		samsung_irda_dev_trace(hdev, 135);
 		rdesc[125] = 0x1;
 		rdesc[127] = 0xe;
 	} else
 	if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
 			rdesc[162] == 0x25 && rdesc[163] == 0x01) {
-		samsung_dev_trace(hdev, 171);
+		samsung_irda_dev_trace(hdev, 171);
 		rdesc[161] = 0x1;
 		rdesc[163] = 0x3;
 	}
 }
 
+#define samsung_kbd_mouse_map_key_clear(c) \
+	hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
+	struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+	unsigned long **bit, int *max)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+	if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
+		return 0;
+
+	dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n",
+		usage->hid & HID_USAGE);
+
+	switch (usage->hid & HID_USAGE) {
+	/* report 2 */
+	case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break;
+	case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL);	break;
+	case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break;
+	case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break;
+	case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
+	case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
+	case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+	case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break;
+	case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break;
+	case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break;
+	case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
+static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+	unsigned int rsize)
+{
+	if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
+		samsung_irda_report_fixup(hdev, rdesc, rsize);
+}
+
+static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+	struct hid_field *field, struct hid_usage *usage,
+	unsigned long **bit, int *max)
+{
+	int ret = 0;
+
+	if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product)
+		ret = samsung_kbd_mouse_input_mapping(hdev,
+			hi, field, usage, bit, max);
+
+	return ret;
+}
+
 static int samsung_probe(struct hid_device *hdev,
 		const struct hid_device_id *id)
 {
@@ -95,10 +162,12 @@
 		goto err_free;
 	}
 
-	if (hdev->rsize == 184) {
-		/* disable hidinput, force hiddev */
-		cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
-			HID_CONNECT_HIDDEV_FORCE;
+	if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) {
+		if (hdev->rsize == 184) {
+			/* disable hidinput, force hiddev */
+			cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
+				HID_CONNECT_HIDDEV_FORCE;
+		}
 	}
 
 	ret = hid_hw_start(hdev, cmask);
@@ -114,6 +183,7 @@
 
 static const struct hid_device_id samsung_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, samsung_devices);
@@ -122,6 +192,7 @@
 	.name = "samsung",
 	.id_table = samsung_devices,
 	.report_fixup = samsung_report_fixup,
+	.input_mapping = samsung_input_mapping,
 	.probe = samsung_probe,
 };
 
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 6925eda..2eebdcc 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -3,6 +3,9 @@
  *
  *  Copyright (c) 2008 Lev Babiev
  *  based on hid-cherry driver
+ *
+ *  Modified to also support BTC "Emprex 3009URF III Vista MCE Remote" by
+ *  Wayne Thomas 2010.
  */
 
 /*
@@ -24,23 +27,29 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
-	if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
 		return 0;
 
 	switch (usage->hid & HID_USAGE) {
-        case 0x00d: ts_map_key_clear(KEY_HOME);           break;
-        case 0x024: ts_map_key_clear(KEY_MENU);           break;
-        case 0x025: ts_map_key_clear(KEY_TV);             break;
-        case 0x048: ts_map_key_clear(KEY_RED);            break;
-        case 0x047: ts_map_key_clear(KEY_GREEN);          break;
-        case 0x049: ts_map_key_clear(KEY_YELLOW);         break;
-        case 0x04a: ts_map_key_clear(KEY_BLUE);           break;
-        case 0x04b: ts_map_key_clear(KEY_ANGLE);          break;
-        case 0x04c: ts_map_key_clear(KEY_LANGUAGE);       break;
-        case 0x04d: ts_map_key_clear(KEY_SUBTITLE);       break;
-        case 0x031: ts_map_key_clear(KEY_AUDIO);          break;
-        case 0x032: ts_map_key_clear(KEY_TEXT);           break;
-        case 0x033: ts_map_key_clear(KEY_CHANNEL);        break;
+	case 0x00d: ts_map_key_clear(KEY_MEDIA);	break;
+	case 0x024: ts_map_key_clear(KEY_MENU);		break;
+	case 0x025: ts_map_key_clear(KEY_TV);		break;
+	case 0x031: ts_map_key_clear(KEY_AUDIO);	break;
+	case 0x032: ts_map_key_clear(KEY_TEXT);		break;
+	case 0x033: ts_map_key_clear(KEY_CHANNEL);	break;
+	case 0x047: ts_map_key_clear(KEY_MP3);		break;
+	case 0x048: ts_map_key_clear(KEY_TV2);		break;
+	case 0x049: ts_map_key_clear(KEY_CAMERA);	break;
+	case 0x04a: ts_map_key_clear(KEY_VIDEO);	break;
+	case 0x04b: ts_map_key_clear(KEY_ANGLE);	break;
+	case 0x04c: ts_map_key_clear(KEY_LANGUAGE);	break;
+	case 0x04d: ts_map_key_clear(KEY_SUBTITLE);	break;
+	case 0x050: ts_map_key_clear(KEY_RADIO);	break;
+	case 0x05a: ts_map_key_clear(KEY_TEXT);		break;
+	case 0x05b: ts_map_key_clear(KEY_RED);		break;
+	case 0x05c: ts_map_key_clear(KEY_GREEN);	break;
+	case 0x05d: ts_map_key_clear(KEY_YELLOW);	break;
+	case 0x05e: ts_map_key_clear(KEY_BLUE);		break;
 	default:
 		return 0;
 	}
@@ -50,6 +59,7 @@
 
 static const struct hid_device_id ts_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f947d83..1e051f1 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -22,14 +22,159 @@
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+#include <linux/power_supply.h>
+#endif
 
 #include "hid-ids.h"
 
 struct wacom_data {
 	__u16 tool;
 	unsigned char butstate;
+	unsigned char high_speed;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+	int battery_capacity;
+	struct power_supply battery;
+	struct power_supply ac;
+#endif
 };
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+/*percent of battery capacity, 0 means AC online*/
+static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
+
+static enum power_supply_property wacom_battery_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CAPACITY
+};
+
+static enum power_supply_property wacom_ac_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+				enum power_supply_property psp,
+				union power_supply_propval *val)
+{
+	struct wacom_data *wdata = container_of(psy,
+					struct wacom_data, battery);
+	int power_state = batcap[wdata->battery_capacity];
+	int ret = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		/* show 100% battery capacity when charging */
+		if (power_state == 0)
+			val->intval = 100;
+		else
+			val->intval = power_state;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int wacom_ac_get_property(struct power_supply *psy,
+				enum power_supply_property psp,
+				union power_supply_propval *val)
+{
+	struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
+	int power_state = batcap[wdata->battery_capacity];
+	int ret = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		/* fall through */
+	case POWER_SUPPLY_PROP_ONLINE:
+		if (power_state == 0)
+			val->intval = 1;
+		else
+			val->intval = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+#endif
+
+static void wacom_poke(struct hid_device *hdev, u8 speed)
+{
+	struct wacom_data *wdata = hid_get_drvdata(hdev);
+	int limit, ret;
+	char rep_data[2];
+
+	rep_data[0] = 0x03 ; rep_data[1] = 0x00;
+	limit = 3;
+	do {
+		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+				HID_FEATURE_REPORT);
+	} while (ret < 0 && limit-- > 0);
+
+	if (ret >= 0) {
+		if (speed == 0)
+			rep_data[0] = 0x05;
+		else
+			rep_data[0] = 0x06;
+
+		rep_data[1] = 0x00;
+		limit = 3;
+		do {
+			ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+					HID_FEATURE_REPORT);
+		} while (ret < 0 && limit-- > 0);
+
+		if (ret >= 0) {
+			wdata->high_speed = speed;
+			return;
+		}
+	}
+
+	/*
+	 * Note that if the raw queries fail, it's not a hard failure and it
+	 * is safe to continue
+	 */
+	dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
+				rep_data[0], ret);
+	return;
+}
+
+static ssize_t wacom_show_speed(struct device *dev,
+				struct device_attribute
+				*attr, char *buf)
+{
+	struct wacom_data *wdata = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	int new_speed;
+
+	if (sscanf(buf, "%1d", &new_speed ) != 1)
+		return -EINVAL;
+
+	if (new_speed == 0 || new_speed == 1) {
+		wacom_poke(hdev, new_speed);
+		return strnlen(buf, PAGE_SIZE);
+	} else
+		return -EINVAL;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+		wacom_show_speed, wacom_store_speed);
+
 static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
 		u8 *raw_data, int size)
 {
@@ -148,6 +293,12 @@
 		input_sync(input);
 	}
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+	/* Store current battery capacity */
+	rw = (data[7] >> 2 & 0x07);
+	if (rw != wdata->battery_capacity)
+		wdata->battery_capacity = rw;
+#endif
 	return 1;
 }
 
@@ -157,9 +308,7 @@
 	struct hid_input *hidinput;
 	struct input_dev *input;
 	struct wacom_data *wdata;
-	char rep_data[2];
 	int ret;
-	int limit;
 
 	wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
 	if (wdata == NULL) {
@@ -182,31 +331,53 @@
 		goto err_free;
 	}
 
-	/*
-	 * Note that if the raw queries fail, it's not a hard failure and it
-	 * is safe to continue
-	 */
+	ret = device_create_file(&hdev->dev, &dev_attr_speed);
+	if (ret)
+		dev_warn(&hdev->dev,
+			"can't create sysfs speed attribute err: %d\n", ret);
 
-	/* Set Wacom mode2 */
-	rep_data[0] = 0x03; rep_data[1] = 0x00;
-	limit = 3;
-	do {
-		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-				HID_FEATURE_REPORT);
-	} while (ret < 0 && limit-- > 0);
-	if (ret < 0)
-		dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+	/* Set Wacom mode 2 with high reporting speed */
+	wacom_poke(hdev, 1);
 
-	/* 0x06 - high reporting speed, 0x05 - low speed */
-	rep_data[0] = 0x06; rep_data[1] = 0x00;
-	limit = 3;
-	do {
-		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-				HID_FEATURE_REPORT);
-	} while (ret < 0 && limit-- > 0);
-	if (ret < 0)
-		dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+	wdata->battery.properties = wacom_battery_props;
+	wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+	wdata->battery.get_property = wacom_battery_get_property;
+	wdata->battery.name = "wacom_battery";
+	wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+	wdata->battery.use_for_apm = 0;
 
+	ret = power_supply_register(&hdev->dev, &wdata->battery);
+	if (ret) {
+		dev_warn(&hdev->dev,
+			"can't create sysfs battery attribute, err: %d\n", ret);
+		/*
+		 * battery attribute is not critical for the tablet, but if it
+		 * failed then there is no need to create ac attribute
+		 */
+		goto move_on;
+	}
+
+	wdata->ac.properties = wacom_ac_props;
+	wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+	wdata->ac.get_property = wacom_ac_get_property;
+	wdata->ac.name = "wacom_ac";
+	wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
+	wdata->ac.use_for_apm = 0;
+
+	ret = power_supply_register(&hdev->dev, &wdata->ac);
+	if (ret) {
+		dev_warn(&hdev->dev,
+			"can't create ac battery attribute, err: %d\n", ret);
+		/*
+		 * ac attribute is not critical for the tablet, but if it
+		 * failed then we don't want to battery attribute to exist
+		 */
+		power_supply_unregister(&wdata->battery);
+	}
+
+move_on:
+#endif
 	hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
 	input = hidinput->input;
 
@@ -251,13 +422,21 @@
 
 static void wacom_remove(struct hid_device *hdev)
 {
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+	struct wacom_data *wdata = hid_get_drvdata(hdev);
+#endif
 	hid_hw_stop(hdev);
+
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+	power_supply_unregister(&wdata->battery);
+	power_supply_unregister(&wdata->ac);
+#endif
 	kfree(hid_get_drvdata(hdev));
 }
 
 static const struct hid_device_id wacom_devices[] = {
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
-
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, wacom_devices);
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
new file mode 100644
index 0000000..9e8d35a
--- /dev/null
+++ b/drivers/hid/hid-zydacron.c
@@ -0,0 +1,237 @@
+/*
+*  HID driver for zydacron remote control
+*
+*  Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+*/
+
+/*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option)
+* any later version.
+*/
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct zc_device {
+	struct input_dev	*input_ep81;
+	unsigned short		last_key[4];
+};
+
+
+/*
+* Zydacron remote control has an invalid HID report descriptor,
+* that needs fixing before we can parse it.
+*/
+static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+	unsigned int rsize)
+{
+	if (rsize >= 253 &&
+		rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
+		rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
+		rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
+			dev_info(&hdev->dev,
+				"fixing up zydacron remote control report "
+				"descriptor\n");
+			rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
+			rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
+		}
+}
+
+#define zc_map_key_clear(c) \
+	hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+	struct hid_field *field, struct hid_usage *usage,
+	unsigned long **bit, int *max)
+{
+	int i;
+	struct zc_device *zc = hid_get_drvdata(hdev);
+	zc->input_ep81 = hi->input;
+
+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+		return 0;
+
+	dbg_hid("zynacron input mapping event [0x%x]\n",
+		usage->hid & HID_USAGE);
+
+	switch (usage->hid & HID_USAGE) {
+	/* report 2 */
+	case 0x10:
+		zc_map_key_clear(KEY_MODE);
+		break;
+	case 0x30:
+		zc_map_key_clear(KEY_SCREEN);
+		break;
+	case 0x70:
+		zc_map_key_clear(KEY_INFO);
+		break;
+	/* report 3 */
+	case 0x04:
+		zc_map_key_clear(KEY_RADIO);
+		break;
+	/* report 4 */
+	case 0x0d:
+		zc_map_key_clear(KEY_PVR);
+		break;
+	case 0x25:
+		zc_map_key_clear(KEY_TV);
+		break;
+	case 0x47:
+		zc_map_key_clear(KEY_AUDIO);
+		break;
+	case 0x49:
+		zc_map_key_clear(KEY_AUX);
+		break;
+	case 0x4a:
+		zc_map_key_clear(KEY_VIDEO);
+		break;
+	case 0x48:
+		zc_map_key_clear(KEY_DVD);
+		break;
+	case 0x24:
+		zc_map_key_clear(KEY_MENU);
+		break;
+	case 0x32:
+		zc_map_key_clear(KEY_TEXT);
+		break;
+	default:
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++)
+		zc->last_key[i] = 0;
+
+	return 1;
+}
+
+static int zc_raw_event(struct hid_device *hdev, struct hid_report *report,
+	 u8 *data, int size)
+{
+	struct zc_device *zc = hid_get_drvdata(hdev);
+	int ret = 0;
+	unsigned key;
+	unsigned short index;
+
+	if (report->id == data[0]) {
+
+		/* break keys */
+		for (index = 0; index < 4; index++) {
+			key = zc->last_key[index];
+			if (key) {
+				input_event(zc->input_ep81, EV_KEY, key, 0);
+				zc->last_key[index] = 0;
+			}
+		}
+
+		key = 0;
+		switch (report->id) {
+		case 0x02:
+		case 0x03:
+			switch (data[1]) {
+			case 0x10:
+				key = KEY_MODE;
+				index = 0;
+				break;
+			case 0x30:
+				key = KEY_SCREEN;
+				index = 1;
+				break;
+			case 0x70:
+				key = KEY_INFO;
+				index = 2;
+				break;
+			case 0x04:
+				key = KEY_RADIO;
+				index = 3;
+				break;
+			}
+
+			if (key) {
+				input_event(zc->input_ep81, EV_KEY, key, 1);
+				zc->last_key[index] = key;
+			}
+
+			ret = 1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct zc_device *zc;
+
+	zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+	if (zc == NULL) {
+		dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+		return -ENOMEM;
+	}
+
+	hid_set_drvdata(hdev, zc);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		dev_err(&hdev->dev, "zydacron: parse failed\n");
+		goto err_free;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		dev_err(&hdev->dev, "zydacron: hw start failed\n");
+		goto err_free;
+	}
+
+	return 0;
+err_free:
+	kfree(zc);
+
+	return ret;
+}
+
+static void zc_remove(struct hid_device *hdev)
+{
+	struct zc_device *zc = hid_get_drvdata(hdev);
+
+	hid_hw_stop(hdev);
+
+	if (NULL != zc)
+		kfree(zc);
+}
+
+static const struct hid_device_id zc_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, zc_devices);
+
+static struct hid_driver zc_driver = {
+	.name = "zydacron",
+	.id_table = zc_devices,
+	.report_fixup = zc_report_fixup,
+	.input_mapping = zc_input_mapping,
+	.raw_event = zc_raw_event,
+	.probe = zc_probe,
+	.remove = zc_remove,
+};
+
+static int __init zc_init(void)
+{
+	return hid_register_driver(&zc_driver);
+}
+
+static void __exit zc_exit(void)
+{
+	hid_unregister_driver(&zc_driver);
+}
+
+module_init(zc_init);
+module_exit(zc_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6eadf1a..3ccd478 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -106,38 +106,48 @@
 static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
 {
 	unsigned int minor = iminor(file->f_path.dentry->d_inode);
-	/* FIXME: What stops hidraw_table going NULL */
-	struct hid_device *dev = hidraw_table[minor]->hid;
+	struct hid_device *dev;
 	__u8 *buf;
 	int ret = 0;
 
-	if (!dev->hid_output_raw_report)
-		return -ENODEV;
+	mutex_lock(&minors_lock);
+	dev = hidraw_table[minor]->hid;
+
+	if (!dev->hid_output_raw_report) {
+		ret = -ENODEV;
+		goto out;
+	}
 
 	if (count > HID_MAX_BUFFER_SIZE) {
 		printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
 				task_pid_nr(current));
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (count < 2) {
 		printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
 				task_pid_nr(current));
-		return -EINVAL;
-	}
-
-	buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	if (copy_from_user(buf, buffer, count)) {
-		ret = -EFAULT;
+		ret = -EINVAL;
 		goto out;
 	}
 
+	buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(buf, buffer, count)) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+
 	ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
-out:
+out_free:
 	kfree(buf);
+out:
+	mutex_unlock(&minors_lock);
 	return ret;
 }
 
@@ -165,11 +175,8 @@
 		goto out;
 	}
 
-	lock_kernel();
 	mutex_lock(&minors_lock);
 	if (!hidraw_table[minor]) {
-		printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
-				minor);
 		kfree(list);
 		err = -ENODEV;
 		goto out_unlock;
@@ -197,7 +204,6 @@
 
 out_unlock:
 	mutex_unlock(&minors_lock);
-	unlock_kernel();
 out:
 	return err;
 
@@ -209,11 +215,8 @@
 	struct hidraw *dev;
 	struct hidraw_list *list = file->private_data;
 
-	if (!hidraw_table[minor]) {
-		printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
-				minor);
+	if (!hidraw_table[minor])
 		return -ENODEV;
-	}
 
 	list_del(&list->node);
 	dev = hidraw_table[minor];
@@ -238,11 +241,12 @@
 	struct inode *inode = file->f_path.dentry->d_inode;
 	unsigned int minor = iminor(inode);
 	long ret = 0;
-	/* FIXME: What stops hidraw_table going NULL */
-	struct hidraw *dev = hidraw_table[minor];
+	struct hidraw *dev;
 	void __user *user_arg = (void __user*) arg;
 
-	lock_kernel();
+	mutex_lock(&minors_lock);
+	dev = hidraw_table[minor];
+
 	switch (cmd) {
 		case HIDIOCGRDESCSIZE:
 			if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -311,11 +315,11 @@
 						-EFAULT : len;
 					break;
 				}
-                }
+		}
 
 		ret = -ENOTTY;
 	}
-	unlock_kernel();
+	mutex_unlock(&minors_lock);
 	return ret;
 }
 
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index df61975..1ebd324 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -623,6 +623,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(usbhid_wait_io);
 
 static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle)
 {
@@ -806,16 +807,36 @@
 	struct usb_host_interface *interface = intf->cur_altsetting;
 	int ret;
 
-	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-		HID_REQ_SET_REPORT,
-		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-		((report_type + 1) << 8) | *buf,
-		interface->desc.bInterfaceNumber, buf + 1, count - 1,
-		USB_CTRL_SET_TIMEOUT);
-
-	/* count also the report id */
-	if (ret > 0)
-		ret++;
+	if (usbhid->urbout) {
+		int actual_length;
+		int skipped_report_id = 0;
+		if (buf[0] == 0x0) {
+			/* Don't send the Report ID */
+			buf++;
+			count--;
+			skipped_report_id = 1;
+		}
+		ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+			buf, count, &actual_length,
+			USB_CTRL_SET_TIMEOUT);
+		/* return the number of bytes transferred */
+		if (ret == 0) {
+			ret = actual_length;
+			/* count also the report id */
+			if (skipped_report_id)
+				ret++;
+		}
+	} else {
+		ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+			HID_REQ_SET_REPORT,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			((report_type + 1) << 8) | *buf,
+			interface->desc.bInterfaceNumber, buf + 1, count - 1,
+			USB_CTRL_SET_TIMEOUT);
+		/* count also the report id */
+		if (ret > 0)
+			ret++;
+	}
 
 	return ret;
 }
@@ -1017,12 +1038,15 @@
 	/* Some keyboards don't work until their LEDs have been set.
 	 * Since BIOSes do set the LEDs, it must be safe for any device
 	 * that supports the keyboard boot protocol.
+	 * In addition, enable remote wakeup by default for all keyboard
+	 * devices supporting the boot protocol.
 	 */
 	if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
 			interface->desc.bInterfaceProtocol ==
-				USB_INTERFACE_PROTOCOL_KEYBOARD)
+				USB_INTERFACE_PROTOCOL_KEYBOARD) {
 		usbhid_set_leds(hid);
-
+		device_set_wakeup_enable(&dev->dev, 1);
+	}
 	return 0;
 
 fail:
@@ -1131,6 +1155,7 @@
 	hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
 	hid->product = le16_to_cpu(dev->descriptor.idProduct);
 	hid->name[0] = 0;
+	hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
 	if (intf->cur_altsetting->desc.bInterfaceProtocol ==
 			USB_INTERFACE_PROTOCOL_MOUSE)
 		hid->type = HID_TYPE_USBMOUSE;
@@ -1287,6 +1312,11 @@
 		{
 			set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
 			spin_unlock_irq(&usbhid->lock);
+			if (hid->driver && hid->driver->suspend) {
+				status = hid->driver->suspend(hid, message);
+				if (status < 0)
+					return status;
+			}
 		} else {
 			usbhid_mark_busy(usbhid);
 			spin_unlock_irq(&usbhid->lock);
@@ -1294,6 +1324,11 @@
 		}
 
 	} else {
+		if (hid->driver && hid->driver->suspend) {
+			status = hid->driver->suspend(hid, message);
+			if (status < 0)
+				return status;
+		}
 		spin_lock_irq(&usbhid->lock);
 		set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
 		spin_unlock_irq(&usbhid->lock);
@@ -1348,6 +1383,11 @@
 		hid_io_error(hid);
 	usbhid_restart_queues(usbhid);
 
+	if (status >= 0 && hid->driver && hid->driver->resume) {
+		int ret = hid->driver->resume(hid);
+		if (ret < 0)
+			status = ret;
+	}
 	dev_dbg(&intf->dev, "resume status %d\n", status);
 	return 0;
 }
@@ -1356,9 +1396,16 @@
 {
 	struct hid_device *hid = usb_get_intfdata(intf);
 	struct usbhid_device *usbhid = hid->driver_data;
+	int status;
 
 	clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
-	return hid_post_reset(intf);
+	status = hid_post_reset(intf);
+	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+		int ret = hid->driver->reset_resume(hid);
+		if (ret < 0)
+			status = ret;
+	}
+	return status;
 }
 
 #endif /* CONFIG_PM */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1152f9b..5ff8d32 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@
 	{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 433602a..c24d2fa 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -267,6 +267,7 @@
 	struct hiddev_list *list;
 	int res, i;
 
+	/* See comment in hiddev_connect() for BKL explanation */
 	lock_kernel();
 	i = iminor(inode) - HIDDEV_MINOR_BASE;
 
@@ -894,8 +895,22 @@
 	hiddev->hid = hid;
 	hiddev->exist = 1;
 
-	/* when lock_kernel() usage is fixed in usb_open(),
-	 * we could also fix it here */
+	/*
+	 * BKL here is used to avoid race after usb_register_dev().
+	 * Once the device node has been created, open() could happen on it.
+	 * The code below will then fail, as hiddev_table hasn't been
+	 * updated.
+	 *
+	 * The obvious fix -- introducing mutex to guard hiddev_table[]
+	 * doesn't work, as usb_open() and usb_register_dev() both take
+	 * minor_rwsem, thus we'll have ABBA deadlock.
+	 *
+	 * Before BKL pushdown, usb_open() had been acquiring it in right
+	 * order, so _open() was safe to use it to protect from this race.
+	 * Now the order is different, but AB-BA deadlock still doesn't occur
+	 * as BKL is dropped on schedule() (i.e. while sleeping on
+	 * minor_rwsem). Fugly.
+	 */
 	lock_kernel();
 	retval = usb_register_dev(usbhid->intf, &hiddev_class);
 	if (retval) {
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index bb14c82..a948605 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -311,6 +311,7 @@
 		goto fail2;
 
 	usb_set_intfdata(iface, kbd);
+	device_set_wakeup_enable(&dev->dev, 1);
 	return 0;
 
 fail2:	
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dcdaf8e..2b9a8f5 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -109,13 +109,13 @@
  * returns after the address has been sent
  */
 static int pca_address(struct i2c_algo_pca_data *adap,
-			struct i2c_msg *msg)
+		       struct i2c_msg *msg)
 {
 	int sta = pca_get_con(adap);
 	int addr;
 
-	addr = ( (0x7f & msg->addr) << 1 );
-	if (msg->flags & I2C_M_RD )
+	addr = ((0x7f & msg->addr) << 1);
+	if (msg->flags & I2C_M_RD)
 		addr |= 1;
 	DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
 	     msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
@@ -134,7 +134,7 @@
  * Returns after the byte has been transmitted
  */
 static int pca_tx_byte(struct i2c_algo_pca_data *adap,
-			__u8 b)
+		       __u8 b)
 {
 	int sta = pca_get_con(adap);
 	DEB2("=== WRITE %#04x\n", b);
@@ -164,13 +164,13 @@
  * Returns after next byte has arrived.
  */
 static int pca_rx_ack(struct i2c_algo_pca_data *adap,
-		       int ack)
+		      int ack)
 {
 	int sta = pca_get_con(adap);
 
 	sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA);
 
-	if ( ack )
+	if (ack)
 		sta |= I2C_PCA_CON_AA;
 
 	pca_set_con(adap, sta);
@@ -178,12 +178,12 @@
 }
 
 static int pca_xfer(struct i2c_adapter *i2c_adap,
-                    struct i2c_msg *msgs,
-                    int num)
+		    struct i2c_msg *msgs,
+		    int num)
 {
-        struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
-        struct i2c_msg *msg = NULL;
-        int curmsg;
+	struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
+	struct i2c_msg *msg = NULL;
+	int curmsg;
 	int numbytes = 0;
 	int state;
 	int ret;
@@ -202,21 +202,21 @@
 
 	DEB1("{{{ XFER %d messages\n", num);
 
-	if (i2c_debug>=2) {
+	if (i2c_debug >= 2) {
 		for (curmsg = 0; curmsg < num; curmsg++) {
 			int addr, i;
 			msg = &msgs[curmsg];
 
 			addr = (0x7f & msg->addr) ;
 
-			if (msg->flags & I2C_M_RD )
+			if (msg->flags & I2C_M_RD)
 				printk(KERN_INFO "    [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
-				       curmsg, msg->len, addr, (addr<<1) | 1);
+				       curmsg, msg->len, addr, (addr << 1) | 1);
 			else {
 				printk(KERN_INFO "    [%02d] WR %d bytes to %#02x [%#02x%s",
-				       curmsg, msg->len, addr, addr<<1,
+				       curmsg, msg->len, addr, addr << 1,
 				       msg->len == 0 ? "" : ", ");
-				for(i=0; i < msg->len; i++)
+				for (i = 0; i < msg->len; i++)
 					printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", ");
 				printk("]\n");
 			}
@@ -305,7 +305,7 @@
 			goto out;
 
 		case 0x58: /* Data byte has been received; NOT ACK has been returned */
-			if ( numbytes == msg->len - 1 ) {
+			if (numbytes == msg->len - 1) {
 				pca_rx_byte(adap, &msg->buf[numbytes], 0);
 				curmsg++; numbytes = 0;
 				if (curmsg == num)
@@ -352,7 +352,7 @@
 
 static u32 pca_func(struct i2c_adapter *adap)
 {
-        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm pca_algo = {
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index bd8f1e4..906a3ca5 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -60,7 +60,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 
 /* ALI1535 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 659f63f..b14f6d6 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -67,7 +67,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* ALI15X3 SMBus address offsets */
 #define SMBHSTSTS	(0 + ali15x3_smba)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index c5a9fa4..03bcd07 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -43,7 +43,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* AMD756 SMBus address offsets */
 #define SMB_ADDR_OFFSET		0xE0
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2fbef27..af1e5e2 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -18,7 +18,7 @@
 #include <linux/delay.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR ("Vojtech Pavlik <vojtech@suse.cz>");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 06e1ecb..305c075 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -23,8 +23,7 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <mach/at91_twi.h>
 #include <mach/board.h>
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 6122556..e5b1a3b 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -37,8 +37,8 @@
 #include <linux/isa.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pcf.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include <asm/irq.h>
 
 #include "../algos/i2c-algo-pcf.h"
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c21077d..d9aa9a6 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -211,7 +211,7 @@
 
 	return ret;
 }
-module_init(i2c_gpio_init);
+subsys_initcall(i2c_gpio_init);
 
 static void __exit i2c_gpio_exit(void)
 {
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c767295..9ff1695 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -28,7 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/hydra.h>
 
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299b918..f4b21f2 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -138,6 +138,17 @@
 #define FEATURE_I2C_BLOCK_READ	(1 << 3)
 static unsigned int i801_features;
 
+static const char *i801_feature_names[] = {
+	"SMBus PEC",
+	"Block buffer",
+	"Block process call",
+	"I2C block read",
+};
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
 /* Make sure the SMBus host is ready to start transmitting.
    Return 0 if it is, -EBUSY if it is not. */
 static int i801_check_pre(void)
@@ -341,9 +352,8 @@
 		do {
 			msleep(1);
 			status = inb_p(SMBHSTSTS);
-		}
-		while ((!(status & SMBHSTSTS_BYTE_DONE))
-		       && (timeout++ < MAX_TIMEOUT));
+		} while ((!(status & SMBHSTSTS_BYTE_DONE))
+			 && (timeout++ < MAX_TIMEOUT));
 
 		result = i801_check_post(status, timeout > MAX_TIMEOUT);
 		if (result < 0)
@@ -440,9 +450,9 @@
 }
 
 /* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 		       unsigned short flags, char read_write, u8 command,
-		       int size, union i2c_smbus_data * data)
+		       int size, union i2c_smbus_data *data)
 {
 	int hwpec;
 	int block = 0;
@@ -511,7 +521,7 @@
 	else
 		outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
 
-	if(block)
+	if (block)
 		ret = i801_block_transaction(data, read_write, size, hwpec);
 	else
 		ret = i801_transaction(xact | ENABLE_INT9);
@@ -523,9 +533,9 @@
 		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
 		       SMBAUXCTL);
 
-	if(block)
+	if (block)
 		return ret;
-	if(ret)
+	if (ret)
 		return ret;
 	if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
 		return 0;
@@ -585,7 +595,7 @@
 	{ 0, }
 };
 
-MODULE_DEVICE_TABLE (pci, i801_ids);
+MODULE_DEVICE_TABLE(pci, i801_ids);
 
 #if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
 static unsigned char apanel_addr;
@@ -689,10 +699,11 @@
 }
 #endif
 
-static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit i801_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
 {
 	unsigned char temp;
-	int err;
+	int err, i;
 #if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
 	const char *vendor;
 #endif
@@ -700,26 +711,28 @@
 	I801_dev = dev;
 	i801_features = 0;
 	switch (dev->device) {
-	case PCI_DEVICE_ID_INTEL_82801EB_3:
-	case PCI_DEVICE_ID_INTEL_ESB_4:
-	case PCI_DEVICE_ID_INTEL_ICH6_16:
-	case PCI_DEVICE_ID_INTEL_ICH7_17:
-	case PCI_DEVICE_ID_INTEL_ESB2_17:
-	case PCI_DEVICE_ID_INTEL_ICH8_5:
-	case PCI_DEVICE_ID_INTEL_ICH9_6:
-	case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
-	case PCI_DEVICE_ID_INTEL_ICH10_4:
-	case PCI_DEVICE_ID_INTEL_ICH10_5:
-	case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
-	case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+	default:
 		i801_features |= FEATURE_I2C_BLOCK_READ;
 		/* fall through */
 	case PCI_DEVICE_ID_INTEL_82801DB_3:
 		i801_features |= FEATURE_SMBUS_PEC;
 		i801_features |= FEATURE_BLOCK_BUFFER;
+		/* fall through */
+	case PCI_DEVICE_ID_INTEL_82801CA_3:
+	case PCI_DEVICE_ID_INTEL_82801BA_2:
+	case PCI_DEVICE_ID_INTEL_82801AB_3:
+	case PCI_DEVICE_ID_INTEL_82801AA_3:
 		break;
 	}
 
+	/* Disable features on user request */
+	for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
+		if (i801_features & disable_features & (1 << i))
+			dev_notice(&dev->dev, "%s disabled by user\n",
+				   i801_feature_names[i]);
+	}
+	i801_features &= ~disable_features;
+
 	err = pci_enable_device(dev);
 	if (err) {
 		dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 2bef534..f8ccc0f 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -39,7 +39,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
 #include <linux/of_platform.h>
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 5901707..112c61f 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -38,8 +38,7 @@
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include "i2c-iop3xx.h"
 
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3623a44..1624206 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,8 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/mv643xx_i2c.h>
 #include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* Register defines */
 #define	MV64XXX_I2C_REG_SLAVE_ADDR			0x00
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4a48dd4..a605a50 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -57,7 +57,7 @@
 #include <linux/dmi.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
@@ -404,10 +404,9 @@
 
 	/* SMBus adapter 1 */
 	res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1");
-	if (res1 < 0) {
-		dev_err(&dev->dev, "Error probing SMB1.\n");
+	if (res1 < 0)
 		smbuses[0].base = 0;	/* to have a check value */
-	}
+
 	/* SMBus adapter 2 */
 	if (dmi_check_system(nforce2_dmi_blacklist2)) {
 		dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n");
@@ -416,11 +415,10 @@
 	} else {
 		res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1],
 					 "SMB2");
-		if (res2 < 0) {
-			dev_err(&dev->dev, "Error probing SMB2.\n");
+		if (res2 < 0)
 			smbuses[1].base = 0;	/* to have a check value */
-		}
 	}
+
 	if ((res1 < 0) && (res2 < 0)) {
 		/* we did not find even one of the SMBuses, so we give up */
 		kfree(smbuses);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index b4ed4ca..0070371 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -19,7 +19,7 @@
 #include <linux/wait.h>
 #include <linux/i2c-ocores.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 struct ocores_i2c {
 	void __iomem *base;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5f41ec0..fc5fbd1 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -33,7 +33,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/i2c-smbus.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include "i2c-parport.h"
 
 #define DEFAULT_BASE 0x378
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 846583e..0eb1515 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -137,7 +137,7 @@
    copied. The attaching code will set getscl to NULL for adapters that
    cannot read SCL back, and will also make the data field point to
    the parallel port structure. */
-static struct i2c_algo_bit_data parport_algo_data = {
+static const struct i2c_algo_bit_data parport_algo_data = {
 	.setsda		= parport_setsda,
 	.setscl		= parport_setscl,
 	.getsda		= parport_getsda,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index d3d4a4b..4174101 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -25,7 +25,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 static struct pci_driver pasemi_smb_driver;
 
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f7346a9..bbd7760 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -30,8 +30,8 @@
 #include <linux/isa.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pca.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include <asm/irq.h>
 
 #define DRIVER "i2c-pca-isa"
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5b2213d..ef5c784 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -23,9 +23,9 @@
 #include <linux/i2c-algo-pca.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/gpio.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
-#include <asm/io.h>
 
 struct i2c_pca_pf_data {
 	void __iomem			*reg_base;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ee9da6f..6d14ac2 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -39,7 +39,7 @@
 #include <linux/init.h>
 #include <linux/dmi.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 
 /* PIIX4 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 7b57d5f..dfa7ae9 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -33,7 +33,7 @@
 #include <linux/completion.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #define DRV_NAME	"pmcmsptwi"
 
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fbde6f6..020ff23 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,9 +34,9 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
-#include <asm/io.h>
 #include <plat/i2c.h>
 
 /*
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ec3256c..72902e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,9 +35,9 @@
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
-#include <asm/io.h>
 
 #include <plat/regs-iic.h>
 #include <plat/iic.h>
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c91359f..cadc021 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -36,8 +36,8 @@
 #include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include "i2c-s6000.h"
 
 #define DRV_NAME "i2c-s6000"
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index b9680f5..4f93da3 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -16,10 +16,10 @@
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <asm/clock.h>
 #include <asm/i2c-sh7760.h>
-#include <asm/io.h>
 
 /* register offsets */
 #define I2CSCR		0x0		/* slave ctrl		*/
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 98b1ec4..3d76a18 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -22,7 +22,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/sibyte/sb1250_regs.h>
 #include <asm/sibyte/sb1250_smbus.h>
 
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 78b0610..2fc08fb 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -24,12 +24,11 @@
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 
-#include <asm/io.h>
-
 struct simtec_i2c_data {
 	struct resource		*ioarea;
 	void __iomem		*reg;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 55a7137..4375866 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -61,7 +61,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 static int blacklist[] = {
 	PCI_DEVICE_ID_SI_540,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 2309c7f..e6f539e 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -53,7 +53,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* SIS630 SMBus registers */
 #define SMB_STS			0x80	/* status */
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index d43d8f8..86837f0 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -38,7 +38,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* base address register in PCI config space */
 #define SIS96x_BAR 0x04
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 0c770ea..b1b34479 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -29,13 +29,16 @@
 #include <linux/i2c.h>
 
 #define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+		   I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+		   I2C_FUNC_SMBUS_I2C_BLOCK)
 
 static unsigned short chip_addr[MAX_CHIPS];
 module_param_array(chip_addr, ushort, NULL, S_IRUGO);
 MODULE_PARM_DESC(chip_addr,
 		 "Chip addresses (up to 10, between 0x03 and 0x77)");
 
-static unsigned long functionality = ~0UL;
+static unsigned long functionality = STUB_FUNC;
 module_param(functionality, ulong, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(functionality, "Override functionality bitfield");
 
@@ -156,9 +159,7 @@
 
 static u32 stub_func(struct i2c_adapter *adapter)
 {
-	return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
-		I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
-		I2C_FUNC_SMBUS_I2C_BLOCK) & functionality;
+	return STUB_FUNC & functionality;
 }
 
 static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 5c47383..6055601 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -15,8 +15,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 #define I2C_CONTROL	0x00
 #define I2C_CONTROLS	0x00
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index de78283..7799fe5 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -25,7 +25,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* Power management registers */
 #define PM_CFG_REVID	0x08	/* silicon revision code */
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index d57292e..4c6fff5 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -51,7 +51,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 static struct pci_dev *vt596_pdev;
 
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 684395b..4cb4bb0 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -32,7 +32,7 @@
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/scx200.h>
 
@@ -552,7 +552,7 @@
  * the name and the BAR where the I/O address resource is located.  ISA
  * devices are flagged with a bar value of -1 */
 
-static struct pci_device_id scx200_pci[] = {
+static const struct pci_device_id scx200_pci[] __initconst = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
 	  .driver_data = 0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 42df0ec..7ee0d50 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -27,7 +27,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/scx200_gpio.h>
 
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7c469a6..db3c9f3 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1221,10 +1221,10 @@
  *
  * Returns negative errno, or else the number of bytes written.
  */
-int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+int i2c_master_send(struct i2c_client *client, const char *buf, int count)
 {
 	int ret;
-	struct i2c_adapter *adap=client->adapter;
+	struct i2c_adapter *adap = client->adapter;
 	struct i2c_msg msg;
 
 	msg.addr = client->addr;
@@ -1248,9 +1248,9 @@
  *
  * Returns negative errno, or else the number of bytes read.
  */
-int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+int i2c_master_recv(struct i2c_client *client, char *buf, int count)
 {
-	struct i2c_adapter *adap=client->adapter;
+	struct i2c_adapter *adap = client->adapter;
 	struct i2c_msg msg;
 	int ret;
 
@@ -1452,7 +1452,7 @@
 }
 EXPORT_SYMBOL_GPL(i2c_new_probed_device);
 
-struct i2c_adapter* i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int id)
 {
 	struct i2c_adapter *adapter;
 
@@ -1479,7 +1479,7 @@
 {
 	int i;
 
-	for(i = 0; i < 8; i++) {
+	for (i = 0; i < 8; i++) {
 		if (data & 0x8000)
 			data = data ^ POLY;
 		data = data << 1;
@@ -1492,7 +1492,7 @@
 {
 	int i;
 
-	for(i = 0; i < count; i++)
+	for (i = 0; i < count; i++)
 		crc = crc8((crc ^ p[i]) << 8);
 	return crc;
 }
@@ -1562,7 +1562,7 @@
  */
 s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
 {
-	return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
 	                      I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
 }
 EXPORT_SYMBOL(i2c_smbus_write_byte);
@@ -1600,9 +1600,9 @@
 {
 	union i2c_smbus_data data;
 	data.byte = value;
-	return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
-	                      I2C_SMBUS_WRITE,command,
-	                      I2C_SMBUS_BYTE_DATA,&data);
+	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+			      I2C_SMBUS_WRITE, command,
+			      I2C_SMBUS_BYTE_DATA, &data);
 }
 EXPORT_SYMBOL(i2c_smbus_write_byte_data);
 
@@ -1639,9 +1639,9 @@
 {
 	union i2c_smbus_data data;
 	data.word = value;
-	return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
-	                      I2C_SMBUS_WRITE,command,
-	                      I2C_SMBUS_WORD_DATA,&data);
+	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+			      I2C_SMBUS_WRITE, command,
+			      I2C_SMBUS_WORD_DATA, &data);
 }
 EXPORT_SYMBOL(i2c_smbus_write_word_data);
 
@@ -1718,9 +1718,9 @@
 		length = I2C_SMBUS_BLOCK_MAX;
 	data.block[0] = length;
 	memcpy(&data.block[1], values, length);
-	return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
-			      I2C_SMBUS_WRITE,command,
-			      I2C_SMBUS_BLOCK_DATA,&data);
+	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+			      I2C_SMBUS_WRITE, command,
+			      I2C_SMBUS_BLOCK_DATA, &data);
 }
 EXPORT_SYMBOL(i2c_smbus_write_block_data);
 
@@ -1762,10 +1762,10 @@
 
 /* Simulate a SMBus command using the i2c protocol
    No checking of parameters is done!  */
-static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
-                                   unsigned short flags,
-                                   char read_write, u8 command, int size,
-                                   union i2c_smbus_data * data)
+static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+				   unsigned short flags,
+				   char read_write, u8 command, int size,
+				   union i2c_smbus_data *data)
 {
 	/* So we need to generate a series of msgs. In the case of writing, we
 	  need to use only one message; when reading, we need two. We initialize
@@ -1773,7 +1773,7 @@
 	  simpler. */
 	unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
 	unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
-	int num = read_write == I2C_SMBUS_READ?2:1;
+	int num = read_write == I2C_SMBUS_READ ? 2 : 1;
 	struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
 	                          { addr, flags | I2C_M_RD, 0, msgbuf1 }
 	                        };
@@ -1782,7 +1782,7 @@
 	int status;
 
 	msgbuf0[0] = command;
-	switch(size) {
+	switch (size) {
 	case I2C_SMBUS_QUICK:
 		msg[0].len = 0;
 		/* Special case: The read/write field is used as data */
@@ -1809,7 +1809,7 @@
 		if (read_write == I2C_SMBUS_READ)
 			msg[1].len = 2;
 		else {
-			msg[0].len=3;
+			msg[0].len = 3;
 			msgbuf0[1] = data->word & 0xff;
 			msgbuf0[2] = data->word >> 8;
 		}
@@ -1902,26 +1902,26 @@
 	}
 
 	if (read_write == I2C_SMBUS_READ)
-		switch(size) {
-			case I2C_SMBUS_BYTE:
-				data->byte = msgbuf0[0];
-				break;
-			case I2C_SMBUS_BYTE_DATA:
-				data->byte = msgbuf1[0];
-				break;
-			case I2C_SMBUS_WORD_DATA:
-			case I2C_SMBUS_PROC_CALL:
-				data->word = msgbuf1[0] | (msgbuf1[1] << 8);
-				break;
-			case I2C_SMBUS_I2C_BLOCK_DATA:
-				for (i = 0; i < data->block[0]; i++)
-					data->block[i+1] = msgbuf1[i];
-				break;
-			case I2C_SMBUS_BLOCK_DATA:
-			case I2C_SMBUS_BLOCK_PROC_CALL:
-				for (i = 0; i < msgbuf1[0] + 1; i++)
-					data->block[i] = msgbuf1[i];
-				break;
+		switch (size) {
+		case I2C_SMBUS_BYTE:
+			data->byte = msgbuf0[0];
+			break;
+		case I2C_SMBUS_BYTE_DATA:
+			data->byte = msgbuf1[0];
+			break;
+		case I2C_SMBUS_WORD_DATA:
+		case I2C_SMBUS_PROC_CALL:
+			data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+			break;
+		case I2C_SMBUS_I2C_BLOCK_DATA:
+			for (i = 0; i < data->block[0]; i++)
+				data->block[i+1] = msgbuf1[i];
+			break;
+		case I2C_SMBUS_BLOCK_DATA:
+		case I2C_SMBUS_BLOCK_PROC_CALL:
+			for (i = 0; i < msgbuf1[0] + 1; i++)
+				data->block[i] = msgbuf1[i];
+			break;
 		}
 	return 0;
 }
@@ -1966,7 +1966,7 @@
 		}
 		rt_mutex_unlock(&adapter->bus_lock);
 	} else
-		res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
+		res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
 					      command, protocol, data);
 
 	return res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4110aa..e0694e4 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,7 +35,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-dev.h>
 #include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 static struct i2c_driver i2cdev_driver;
 
@@ -132,45 +132,45 @@
  * needed by those system calls and by this SMBus interface.
  */
 
-static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
-                            loff_t *offset)
+static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
+		loff_t *offset)
 {
 	char *tmp;
 	int ret;
 
-	struct i2c_client *client = (struct i2c_client *)file->private_data;
+	struct i2c_client *client = file->private_data;
 
 	if (count > 8192)
 		count = 8192;
 
-	tmp = kmalloc(count,GFP_KERNEL);
-	if (tmp==NULL)
+	tmp = kmalloc(count, GFP_KERNEL);
+	if (tmp == NULL)
 		return -ENOMEM;
 
 	pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
 		iminor(file->f_path.dentry->d_inode), count);
 
-	ret = i2c_master_recv(client,tmp,count);
+	ret = i2c_master_recv(client, tmp, count);
 	if (ret >= 0)
-		ret = copy_to_user(buf,tmp,count)?-EFAULT:ret;
+		ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
 	kfree(tmp);
 	return ret;
 }
 
-static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t count,
-                             loff_t *offset)
+static ssize_t i2cdev_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *offset)
 {
 	int ret;
 	char *tmp;
-	struct i2c_client *client = (struct i2c_client *)file->private_data;
+	struct i2c_client *client = file->private_data;
 
 	if (count > 8192)
 		count = 8192;
 
-	tmp = kmalloc(count,GFP_KERNEL);
-	if (tmp==NULL)
+	tmp = kmalloc(count, GFP_KERNEL);
+	if (tmp == NULL)
 		return -ENOMEM;
-	if (copy_from_user(tmp,buf,count)) {
+	if (copy_from_user(tmp, buf, count)) {
 		kfree(tmp);
 		return -EFAULT;
 	}
@@ -178,7 +178,7 @@
 	pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
 		iminor(file->f_path.dentry->d_inode), count);
 
-	ret = i2c_master_send(client,tmp,count);
+	ret = i2c_master_send(client, tmp, count);
 	kfree(tmp);
 	return ret;
 }
@@ -369,13 +369,13 @@
 
 static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-	struct i2c_client *client = (struct i2c_client *)file->private_data;
+	struct i2c_client *client = file->private_data;
 	unsigned long funcs;
 
 	dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
 		cmd, arg);
 
-	switch ( cmd ) {
+	switch (cmd) {
 	case I2C_SLAVE:
 	case I2C_SLAVE_FORCE:
 		/* NOTE:  devices set up to work with "new style" drivers
@@ -601,7 +601,7 @@
 {
 	i2c_del_driver(&i2cdev_driver);
 	class_destroy(i2c_dev_class);
-	unregister_chrdev(I2C_MAJOR,"i2c");
+	unregister_chrdev(I2C_MAJOR, "i2c");
 }
 
 MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3b128dc..33d6503 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -407,32 +407,24 @@
 	return 0;
 }
 
-static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
 {
-	u64 set = min(capacity, drive->probed_capacity);
 	u16 *id = drive->id;
 	int lba48 = ata_id_lba48_enabled(id);
 
 	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
 	    ata_id_hpa_enabled(id) == 0)
-		goto out;
+		return;
 
 	/*
 	 * according to the spec the SET MAX ADDRESS command shall be
 	 * immediately preceded by a READ NATIVE MAX ADDRESS command
 	 */
-	capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
-	if (capacity == 0)
-		goto out;
+	if (!ide_disk_hpa_get_native_capacity(drive, lba48))
+		return;
 
-	set = ide_disk_hpa_set_capacity(drive, set, lba48);
-	if (set) {
-		/* needed for ->resume to disable HPA */
-		drive->dev_flags |= IDE_DFLAG_NOHPA;
-		return set;
-	}
-out:
-	return drive->capacity64;
+	if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
+		drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
 }
 
 static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
@@ -783,13 +775,13 @@
 }
 
 const struct ide_disk_ops ide_ata_disk_ops = {
-	.check		= ide_disk_check,
-	.set_capacity	= ide_disk_set_capacity,
-	.get_capacity	= ide_disk_get_capacity,
-	.setup		= ide_disk_setup,
-	.flush		= ide_disk_flush,
-	.init_media	= ide_disk_init_media,
-	.set_doorlock	= ide_disk_set_doorlock,
-	.do_request	= ide_do_rw_disk,
-	.ioctl		= ide_disk_ioctl,
+	.check			= ide_disk_check,
+	.unlock_native_capacity	= ide_disk_unlock_native_capacity,
+	.get_capacity		= ide_disk_get_capacity,
+	.setup			= ide_disk_setup,
+	.flush			= ide_disk_flush,
+	.init_media		= ide_disk_init_media,
+	.set_doorlock		= ide_disk_set_doorlock,
+	.do_request		= ide_do_rw_disk,
+	.ioctl			= ide_disk_ioctl,
 };
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c32d83996..c102d23 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -288,17 +288,14 @@
 	return ret;
 }
 
-static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
-					      unsigned long long capacity)
+static void ide_gd_unlock_native_capacity(struct gendisk *disk)
 {
 	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
 	ide_drive_t *drive = idkp->drive;
 	const struct ide_disk_ops *disk_ops = drive->disk_ops;
 
-	if (disk_ops->set_capacity)
-		return disk_ops->set_capacity(drive, capacity);
-
-	return drive->capacity64;
+	if (disk_ops->unlock_native_capacity)
+		disk_ops->unlock_native_capacity(drive);
 }
 
 static int ide_gd_revalidate_disk(struct gendisk *disk)
@@ -329,7 +326,7 @@
 	.locked_ioctl		= ide_gd_ioctl,
 	.getgeo			= ide_gd_getgeo,
 	.media_changed		= ide_gd_media_changed,
-	.set_capacity		= ide_gd_set_capacity,
+	.unlock_native_capacity	= ide_gd_unlock_native_capacity,
 	.revalidate_disk	= ide_gd_revalidate_disk
 };
 
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 7e319d6..f34f1db 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -209,4 +209,20 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called altera_ps2.
 
+config SERIO_AMS_DELTA
+	tristate "Amstrad Delta (E3) mailboard support"
+	depends on MACH_AMS_DELTA
+	default y
+	select AMS_DELTA_FIQ
+	---help---
+	  Say Y here if you have an E3 and want to use its mailboard,
+	  or any standard AT keyboard connected to the mailboard port.
+
+	  When used for the E3 mailboard, a non-standard key table
+	  must be loaded from userspace, possibly using udev extras
+	  provided keymap helper utility.
+
+	  To compile this driver as a module, choose M here;
+	  the module will be called ams_delta_serio.
+
 endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index bf945f7..84c80bf 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -21,5 +21,6 @@
 obj-$(CONFIG_SERIO_MACEPS2)	+= maceps2.o
 obj-$(CONFIG_SERIO_LIBPS2)	+= libps2.o
 obj-$(CONFIG_SERIO_RAW)		+= serio_raw.o
+obj-$(CONFIG_SERIO_AMS_DELTA)	+= ams_delta_serio.o
 obj-$(CONFIG_SERIO_XILINX_XPS_PS2)	+= xilinx_ps2.o
 obj-$(CONFIG_SERIO_ALTERA_PS2)	+= altera_ps2.o
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
new file mode 100644
index 0000000..8f1770e
--- /dev/null
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -0,0 +1,177 @@
+/*
+ *  Amstrad E3 (Delta) keyboard port driver
+ *
+ *  Copyright (c) 2006 Matt Callow
+ *  Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Thanks to Cliff Lawson for his help
+ *
+ * The Amstrad Delta keyboard (aka mailboard) uses normal PC-AT style serial
+ * transmission.  The keyboard port is formed of two GPIO lines, for clock
+ * and data.  Due to strict timing requirements of the interface,
+ * the serial data stream is read and processed by a FIQ handler.
+ * The resulting words are fetched by this driver from a circular buffer.
+ *
+ * Standard AT keyboard driver (atkbd) is used for handling the keyboard data.
+ * However, when used with the E3 mailboard that producecs non-standard
+ * scancodes, a custom key table must be prepared and loaded from userspace.
+ */
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include <asm/mach-types.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+MODULE_AUTHOR("Matt Callow");
+MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
+MODULE_LICENSE("GPL");
+
+static struct serio *ams_delta_serio;
+
+static int check_data(int data)
+{
+	int i, parity = 0;
+
+	/* check valid stop bit */
+	if (!(data & 0x400)) {
+		dev_warn(&ams_delta_serio->dev,
+				"invalid stop bit, data=0x%X\n",
+				data);
+		return SERIO_FRAME;
+	}
+	/* calculate the parity */
+	for (i = 1; i < 10; i++) {
+		if (data & (1 << i))
+			parity++;
+	}
+	/* it should be odd */
+	if (!(parity & 0x01)) {
+		dev_warn(&ams_delta_serio->dev,
+				"paritiy check failed, data=0x%X parity=0x%X\n",
+				data, parity);
+		return SERIO_PARITY;
+	}
+	return 0;
+}
+
+static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
+{
+	int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+	int data, dfl;
+	u8 scancode;
+
+	fiq_buffer[FIQ_IRQ_PEND] = 0;
+
+	/*
+	 * Read data from the circular buffer, check it
+	 * and then pass it on the serio
+	 */
+	while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+
+		data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
+		fiq_buffer[FIQ_KEYS_CNT]--;
+		if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
+			fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+
+		dfl = check_data(data);
+		scancode = (u8) (data >> 1) & 0xFF;
+		serio_interrupt(ams_delta_serio, scancode, dfl);
+	}
+	return IRQ_HANDLED;
+}
+
+static int ams_delta_serio_open(struct serio *serio)
+{
+	/* enable keyboard */
+	ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
+			AMD_DELTA_LATCH2_KEYBRD_PWR);
+
+	return 0;
+}
+
+static void ams_delta_serio_close(struct serio *serio)
+{
+	/* disable keyboard */
+	ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+}
+
+static int __init ams_delta_serio_init(void)
+{
+	int err;
+
+	if (!machine_is_ams_delta())
+		return -ENODEV;
+
+	ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+	if (!ams_delta_serio)
+		return -ENOMEM;
+
+	ams_delta_serio->id.type = SERIO_8042;
+	ams_delta_serio->open = ams_delta_serio_open;
+	ams_delta_serio->close = ams_delta_serio_close;
+	strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
+			sizeof(ams_delta_serio->name));
+	strlcpy(ams_delta_serio->phys, "GPIO/serio0",
+			sizeof(ams_delta_serio->phys));
+
+	err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+	if (err) {
+		pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+		goto serio;
+	}
+	gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+
+	err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
+	if (err) {
+		pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
+		goto gpio_data;
+	}
+	gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+
+	err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+			ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
+			"ams-delta-serio", 0);
+	if (err < 0) {
+		pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
+				gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
+		goto gpio_clk;
+	}
+	/*
+	 * Since GPIO register handling for keyboard clock pin is performed
+	 * at FIQ level, switch back from edge to simple interrupt handler
+	 * to avoid bad interaction.
+	 */
+	set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+			handle_simple_irq);
+
+	serio_register_port(ams_delta_serio);
+	dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+
+	return 0;
+gpio_clk:
+	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+gpio_data:
+	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+serio:
+	kfree(ams_delta_serio);
+	return err;
+}
+module_init(ams_delta_serio_init);
+
+static void __exit ams_delta_serio_exit(void)
+{
+	serio_unregister_port(ams_delta_serio);
+	free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+	kfree(ams_delta_serio);
+}
+module_exit(ams_delta_serio_exit);
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 79119f5..bd6da7a 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -155,6 +155,7 @@
 	while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
 		if (--timeout == 0) {
 			out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
+			spin_unlock_irqrestore(&macio_lock, flags);
 			return -1;
 		}
 	}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 888448c..c9da5c4 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1183,8 +1183,10 @@
 		return -EOVERFLOW;
 	spin_lock_irqsave(&pp->lock, flags);
 	if (pp->cmd.status == 1) {
-		if (file->f_flags & O_NONBLOCK)
+		if (file->f_flags & O_NONBLOCK) {
+			spin_unlock_irqrestore(&pp->lock, flags);
 			return -EAGAIN;
+		}
 		add_wait_queue(&pp->wait, &wait);
 		for (;;) {
 			set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c42eeb4..16d82f1 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,6 +182,7 @@
 
 	thermostat = NULL;
 
+	i2c_set_clientdata(client, NULL);
 	kfree(th);
 
 	return 0;
@@ -399,6 +400,7 @@
 	rc = read_reg(th, CONFIG_REG);
 	if (rc < 0) {
 		dev_err(&client->dev, "Thermostat failed to read config!\n");
+		i2c_set_clientdata(client, NULL);
 		kfree(th);
 		return -ENODEV;
 	}
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 129cda7..749d174 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -757,10 +757,8 @@
 		wf_put_control(cpufreq_clamp);
 
 	/* Destroy control loops state structures */
-	if (wf_smu_sys_fans)
-		kfree(wf_smu_sys_fans);
-	if (wf_smu_cpu_fans)
-		kfree(wf_smu_cpu_fans);
+	kfree(wf_smu_sys_fans);
+	kfree(wf_smu_cpu_fans);
 
 	return 0;
 }
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index bea9916..3442732 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -687,12 +687,9 @@
 		wf_put_control(cpufreq_clamp);
 
 	/* Destroy control loops state structures */
-	if (wf_smu_slots_fans)
-		kfree(wf_smu_cpu_fans);
-	if (wf_smu_drive_fans)
-		kfree(wf_smu_cpu_fans);
-	if (wf_smu_cpu_fans)
-		kfree(wf_smu_cpu_fans);
+	kfree(wf_smu_slots_fans);
+	kfree(wf_smu_drive_fans);
+	kfree(wf_smu_cpu_fans);
 
 	return 0;
 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 26ac8aa..f084249 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1678,9 +1678,9 @@
 
 	bitmap->mddev = mddev;
 
-	bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+	bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
 	if (bm) {
-		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
 		sysfs_put(bm);
 	} else
 		bitmap->sysfs_can_clear = NULL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63d..a9fd491 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1766,7 +1766,7 @@
 		kobject_del(&rdev->kobj);
 		goto fail;
 	}
-	rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+	rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
 
 	list_add_rcu(&rdev->same_set, &mddev->disks);
 	bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -4189,7 +4189,7 @@
 	mutex_unlock(&disks_mutex);
 	if (!error) {
 		kobject_uevent(&mddev->kobj, KOBJ_ADD);
-		mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+		mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
 	}
 	mddev_put(mddev);
 	return error;
@@ -4398,7 +4398,7 @@
 			printk(KERN_WARNING
 			       "md: cannot register extra attributes for %s\n",
 			       mdname(mddev));
-		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
 	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
 		mddev->ro = 0;
 
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7b6f7ee..f12dc3e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -3,7 +3,6 @@
 #
 
 obj-$(CONFIG_IBM_ASM)		+= ibmasm/
-obj-$(CONFIG_HDPU_FEATURES)	+= hdpuftrs/
 obj-$(CONFIG_AD525X_DPOT)	+= ad525x_dpot.o
 obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index ed090e7..19fc7c1 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -707,7 +707,7 @@
 	return nread;
 }
 
-static ssize_t c2port_read_flash_data(struct kobject *kobj,
+static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
@@ -824,7 +824,7 @@
 	return nwrite;
 }
 
-static ssize_t c2port_write_flash_data(struct kobject *kobj,
+static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 9197cfc..a513f0a 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -140,7 +140,8 @@
 /*
  * User data attribute
  */
-static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
+				  struct bin_attribute *attr,
 				  char *buf, loff_t off, size_t count)
 {
 	struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -163,7 +164,8 @@
 	return count;
 }
 
-static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
+				   struct bin_attribute *attr,
 				   char *buf, loff_t off, size_t count)
 {
 	struct i2c_client *client = kobj_to_i2c_client(kobj);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db7d0f2..f7ca3a4 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -54,7 +54,7 @@
 struct at24_data {
 	struct at24_platform_data chip;
 	struct memory_accessor macc;
-	bool use_smbus;
+	int use_smbus;
 
 	/*
 	 * Lock protects against activities from other Linux tasks,
@@ -184,11 +184,19 @@
 	if (count > io_limit)
 		count = io_limit;
 
-	if (at24->use_smbus) {
+	switch (at24->use_smbus) {
+	case I2C_SMBUS_I2C_BLOCK_DATA:
 		/* Smaller eeproms can work given some SMBus extension calls */
 		if (count > I2C_SMBUS_BLOCK_MAX)
 			count = I2C_SMBUS_BLOCK_MAX;
-	} else {
+		break;
+	case I2C_SMBUS_WORD_DATA:
+		count = 2;
+		break;
+	case I2C_SMBUS_BYTE_DATA:
+		count = 1;
+		break;
+	default:
 		/*
 		 * When we have a better choice than SMBus calls, use a
 		 * combined I2C message. Write address; then read up to
@@ -219,10 +227,27 @@
 	timeout = jiffies + msecs_to_jiffies(write_timeout);
 	do {
 		read_time = jiffies;
-		if (at24->use_smbus) {
+		switch (at24->use_smbus) {
+		case I2C_SMBUS_I2C_BLOCK_DATA:
 			status = i2c_smbus_read_i2c_block_data(client, offset,
 					count, buf);
-		} else {
+			break;
+		case I2C_SMBUS_WORD_DATA:
+			status = i2c_smbus_read_word_data(client, offset);
+			if (status >= 0) {
+				buf[0] = status & 0xff;
+				buf[1] = status >> 8;
+				status = count;
+			}
+			break;
+		case I2C_SMBUS_BYTE_DATA:
+			status = i2c_smbus_read_byte_data(client, offset);
+			if (status >= 0) {
+				buf[0] = status;
+				status = count;
+			}
+			break;
+		default:
 			status = i2c_transfer(client->adapter, msg, 2);
 			if (status == 2)
 				status = count;
@@ -274,7 +299,8 @@
 	return retval;
 }
 
-static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct at24_data *at24;
@@ -395,7 +421,8 @@
 	return retval;
 }
 
-static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct at24_data *at24;
@@ -434,7 +461,7 @@
 {
 	struct at24_platform_data chip;
 	bool writable;
-	bool use_smbus = false;
+	int use_smbus = 0;
 	struct at24_data *at24;
 	int err;
 	unsigned i, num_addresses;
@@ -475,12 +502,19 @@
 			err = -EPFNOSUPPORT;
 			goto err_out;
 		}
-		if (!i2c_check_functionality(client->adapter,
+		if (i2c_check_functionality(client->adapter,
 				I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+			use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+		} else if (i2c_check_functionality(client->adapter,
+				I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+			use_smbus = I2C_SMBUS_WORD_DATA;
+		} else if (i2c_check_functionality(client->adapter,
+				I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+			use_smbus = I2C_SMBUS_BYTE_DATA;
+		} else {
 			err = -EPFNOSUPPORT;
 			goto err_out;
 		}
-		use_smbus = true;
 	}
 
 	if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -566,11 +600,16 @@
 	dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
 		at24->bin.size, client->name,
 		writable ? "(writable)" : "(read-only)");
+	if (use_smbus == I2C_SMBUS_WORD_DATA ||
+	    use_smbus == I2C_SMBUS_BYTE_DATA) {
+		dev_notice(&client->dev, "Falling back to %s reads, "
+			   "performance will suffer\n", use_smbus ==
+			   I2C_SMBUS_WORD_DATA ? "word" : "byte");
+	}
 	dev_dbg(&client->dev,
-		"page_size %d, num_addresses %d, write_max %d%s\n",
+		"page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
 		chip.page_size, num_addresses,
-		at24->write_max,
-		use_smbus ? ", use_smbus" : "");
+		at24->write_max, use_smbus);
 
 	/* export data to kernel code */
 	if (chip.setup)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index d194212..c627e41 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -126,7 +126,8 @@
 }
 
 static ssize_t
-at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_read(struct file *filp, struct kobject *kobj,
+	      struct bin_attribute *bin_attr,
 	      char *buf, loff_t off, size_t count)
 {
 	struct device		*dev;
@@ -253,7 +254,8 @@
 }
 
 static ssize_t
-at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_write(struct file *filp, struct kobject *kobj,
+	       struct bin_attribute *bin_attr,
 	       char *buf, loff_t off, size_t count)
 {
 	struct device		*dev;
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index e306a8c..45060dd 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -81,7 +81,8 @@
 	mutex_unlock(&data->update_lock);
 }
 
-static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index fe29092..5653a3c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -107,7 +107,7 @@
 	mutex_unlock(&data->update_lock);
 }
 
-static ssize_t max6875_read(struct kobject *kobj,
+static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
 			    struct bin_attribute *bin_attr,
 			    char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644
index ac74ae6..0000000
--- a/drivers/misc/hdpuftrs/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644
index 176fe4e..0000000
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- *	Sky CPU State Driver
- *
- *	Copyright (C) 2002 Brian Waite
- *
- *	This driver allows use of the CPU state bits
- *	It exports the /dev/sky_cpustate and also
- *	/proc/sky_cpustate pseudo-file for status information.
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/miscdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-#define SKY_CPUSTATE_VERSION		"1.1"
-
-static int hdpu_cpustate_probe(struct platform_device *pdev);
-static int hdpu_cpustate_remove(struct platform_device *pdev);
-
-static unsigned char cpustate_get_state(void);
-static int cpustate_proc_open(struct inode *inode, struct file *file);
-static int cpustate_proc_read(struct seq_file *seq, void *offset);
-
-static struct cpustate_t cpustate;
-
-static const struct file_operations proc_cpustate = {
-	.open = cpustate_proc_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-static int cpustate_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, cpustate_proc_read, NULL);
-}
-
-static int cpustate_proc_read(struct seq_file *seq, void *offset)
-{
-	seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
-	return 0;
-}
-
-static int cpustate_get_ref(int excl)
-{
-
-	int retval = -EBUSY;
-
-	spin_lock(&cpustate.lock);
-
-	if (cpustate.excl)
-		goto out_busy;
-
-	if (excl) {
-		if (cpustate.open_count)
-			goto out_busy;
-		cpustate.excl = 1;
-	}
-
-	cpustate.open_count++;
-	retval = 0;
-
-      out_busy:
-	spin_unlock(&cpustate.lock);
-	return retval;
-}
-
-static int cpustate_free_ref(void)
-{
-
-	spin_lock(&cpustate.lock);
-
-	cpustate.excl = 0;
-	cpustate.open_count--;
-
-	spin_unlock(&cpustate.lock);
-	return 0;
-}
-
-static unsigned char cpustate_get_state(void)
-{
-
-	return cpustate.cached_val;
-}
-
-static void cpustate_set_state(unsigned char new_state)
-{
-	unsigned int state = (new_state << 21);
-
-#ifdef DEBUG_CPUSTATE
-	printk("CPUSTATE -> 0x%x\n", new_state);
-#endif
-	spin_lock(&cpustate.lock);
-	cpustate.cached_val = new_state;
-	writel((0xff << 21), cpustate.clr_addr);
-	writel(state, cpustate.set_addr);
-	spin_unlock(&cpustate.lock);
-}
-
-/*
- *	Now all the various file operations that we export.
- */
-
-static ssize_t cpustate_read(struct file *file, char *buf,
-			     size_t count, loff_t * ppos)
-{
-	unsigned char data;
-
-	if (count < 0)
-		return -EFAULT;
-	if (count == 0)
-		return 0;
-
-	data = cpustate_get_state();
-	if (copy_to_user(buf, &data, sizeof(unsigned char)))
-		return -EFAULT;
-	return sizeof(unsigned char);
-}
-
-static ssize_t cpustate_write(struct file *file, const char *buf,
-			      size_t count, loff_t * ppos)
-{
-	unsigned char data;
-
-	if (count < 0)
-		return -EFAULT;
-
-	if (count == 0)
-		return 0;
-
-	if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
-		return -EFAULT;
-
-	cpustate_set_state(data);
-	return sizeof(unsigned char);
-}
-
-static int cpustate_open(struct inode *inode, struct file *file)
-{
-	int ret;
-
-	lock_kernel();
-	ret = cpustate_get_ref((file->f_flags & O_EXCL));
-	unlock_kernel();
-
-	return ret;
-}
-
-static int cpustate_release(struct inode *inode, struct file *file)
-{
-	return cpustate_free_ref();
-}
-
-static struct platform_driver hdpu_cpustate_driver = {
-	.probe = hdpu_cpustate_probe,
-	.remove = hdpu_cpustate_remove,
-	.driver = {
-		.name = HDPU_CPUSTATE_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-/*
- *	The various file operations we support.
- */
-static const struct file_operations cpustate_fops = {
-      .owner	= THIS_MODULE,
-      .open	= cpustate_open,
-      .release	= cpustate_release,
-      .read	= cpustate_read,
-      .write	= cpustate_write,
-      .llseek	= no_llseek,
-};
-
-static struct miscdevice cpustate_dev = {
-	.minor	= MISC_DYNAMIC_MINOR,
-	.name	= "sky_cpustate",
-	.fops	= &cpustate_fops,
-};
-
-static int hdpu_cpustate_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	struct proc_dir_entry *proc_de;
-	int ret;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		printk(KERN_ERR "sky_cpustate: "
-		       "Invalid memory resource.\n");
-		return -EINVAL;
-	}
-	cpustate.set_addr = (unsigned long *)res->start;
-	cpustate.clr_addr = (unsigned long *)res->end - 1;
-
-	ret = misc_register(&cpustate_dev);
-	if (ret) {
-		printk(KERN_WARNING "sky_cpustate: "
-		       "Unable to register misc device.\n");
-		cpustate.set_addr = NULL;
-		cpustate.clr_addr = NULL;
-		return ret;
-	}
-
-	proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
-	if (!proc_de) {
-		printk(KERN_WARNING "sky_cpustate: "
-		       "Unable to create proc entry\n");
-	}
-
-	printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
-	return 0;
-}
-
-static int hdpu_cpustate_remove(struct platform_device *pdev)
-{
-	cpustate.set_addr = NULL;
-	cpustate.clr_addr = NULL;
-
-	remove_proc_entry("sky_cpustate", NULL);
-	misc_deregister(&cpustate_dev);
-
-	return 0;
-}
-
-static int __init cpustate_init(void)
-{
-	return platform_driver_register(&hdpu_cpustate_driver);
-}
-
-static void __exit cpustate_exit(void)
-{
-	platform_driver_unregister(&hdpu_cpustate_driver);
-}
-
-module_init(cpustate_init);
-module_exit(cpustate_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644
index ce39fa5..0000000
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- *	Sky Nexus Register Driver
- *
- *	Copyright (C) 2002 Brian Waite
- *
- *	This driver allows reading the Nexus register
- *	It exports the /proc/sky_chassis_id and also
- *	/proc/sky_slot_id pseudo-file for status information.
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-static int hdpu_nexus_probe(struct platform_device *pdev);
-static int hdpu_nexus_remove(struct platform_device *pdev);
-static int hdpu_slot_id_open(struct inode *inode, struct file *file);
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
-
-static struct proc_dir_entry *hdpu_slot_id;
-static struct proc_dir_entry *hdpu_chassis_id;
-static int slot_id = -1;
-static int chassis_id = -1;
-
-static const struct file_operations proc_slot_id = {
-	.open = hdpu_slot_id_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-static const struct file_operations proc_chassis_id = {
-	.open = hdpu_chassis_id_open,
-	.read = seq_read,
-	.llseek	= seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-static struct platform_driver hdpu_nexus_driver = {
-	.probe = hdpu_nexus_probe,
-	.remove = hdpu_nexus_remove,
-	.driver = {
-		.name = HDPU_NEXUS_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-static int hdpu_slot_id_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, hdpu_slot_id_read, NULL);
-}
-
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
-{
-	seq_printf(seq, "%d\n", slot_id);
-	return 0;
-}
-
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, hdpu_chassis_id_read, NULL);
-}
-
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
-{
-	seq_printf(seq, "%d\n", chassis_id);
-	return 0;
-}
-
-static int hdpu_nexus_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	int *nexus_id_addr;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		printk(KERN_ERR "sky_nexus: "
-		       "Invalid memory resource.\n");
-		return -EINVAL;
-	}
-	nexus_id_addr = ioremap(res->start,
-				(unsigned long)(res->end - res->start));
-	if (nexus_id_addr) {
-		slot_id = (*nexus_id_addr >> 8) & 0x1f;
-		chassis_id = *nexus_id_addr & 0xff;
-		iounmap(nexus_id_addr);
-	} else {
-		printk(KERN_ERR "sky_nexus: Could not map slot id\n");
-	}
-
-	hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
-	if (!hdpu_slot_id) {
-		printk(KERN_WARNING "sky_nexus: "
-		       "Unable to create proc dir entry: sky_slot_id\n");
-	}
-
-	hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
-				      &proc_chassis_id);
-	if (!hdpu_chassis_id)
-		printk(KERN_WARNING "sky_nexus: "
-		       "Unable to create proc dir entry: sky_chassis_id\n");
-
-	return 0;
-}
-
-static int hdpu_nexus_remove(struct platform_device *pdev)
-{
-	slot_id = -1;
-	chassis_id = -1;
-
-	remove_proc_entry("sky_slot_id", NULL);
-	remove_proc_entry("sky_chassis_id", NULL);
-
-	hdpu_slot_id = 0;
-	hdpu_chassis_id = 0;
-
-	return 0;
-}
-
-static int __init nexus_init(void)
-{
-	return platform_driver_register(&hdpu_nexus_driver);
-}
-
-static void __exit nexus_exit(void)
-{
-	platform_driver_unregister(&hdpu_nexus_driver);
-}
-
-module_init(nexus_init);
-module_exit(nexus_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 04ae884..61f1d27 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2007 Google Inc,
  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *  Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
 #include <linux/log2.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
 #include <linux/clk.h>
 #include <linux/scatterlist.h>
 #include <linux/platform_device.h>
@@ -47,6 +49,8 @@
 
 #define DRIVER_NAME "msm-sdcc"
 
+#define BUSCLK_PWRSAVE 1
+#define BUSCLK_TIMEOUT (HZ)
 static unsigned int msmsdcc_fmin = 144000;
 static unsigned int msmsdcc_fmax = 50000000;
 static unsigned int msmsdcc_4bit = 1;
@@ -57,6 +61,67 @@
 #define PIO_SPINMAX 30
 #define CMD_SPINMAX 20
 
+
+static inline void
+msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+{
+	WARN_ON(!host->clks_on);
+
+	BUG_ON(host->curr.mrq);
+
+	if (deferr) {
+		mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
+	} else {
+		del_timer_sync(&host->busclk_timer);
+		/* Need to check clks_on again in case the busclk
+		 * timer fired
+		 */
+		if (host->clks_on) {
+			clk_disable(host->clk);
+			clk_disable(host->pclk);
+			host->clks_on = 0;
+		}
+	}
+}
+
+static inline int
+msmsdcc_enable_clocks(struct msmsdcc_host *host)
+{
+	int rc;
+
+	del_timer_sync(&host->busclk_timer);
+
+	if (!host->clks_on) {
+		rc = clk_enable(host->pclk);
+		if (rc)
+			return rc;
+		rc = clk_enable(host->clk);
+		if (rc) {
+			clk_disable(host->pclk);
+			return rc;
+		}
+		udelay(1 + ((3 * USEC_PER_SEC) /
+		       (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+		host->clks_on = 1;
+	}
+	return 0;
+}
+
+static inline unsigned int
+msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
+{
+	return readl(host->base + reg);
+}
+
+static inline void
+msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
+{
+	writel(data, host->base + reg);
+	/* 3 clk delay required! */
+	udelay(1 + ((3 * USEC_PER_SEC) /
+	       (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+}
+
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
 		      u32 c);
@@ -64,8 +129,6 @@
 static void
 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
 {
-	writel(0, host->base + MMCICOMMAND);
-
 	BUG_ON(host->curr.data);
 
 	host->curr.mrq = NULL;
@@ -76,6 +139,9 @@
 	if (mrq->cmd->error == -ETIMEDOUT)
 		mdelay(5);
 
+#if BUSCLK_PWRSAVE
+	msmsdcc_disable_clocks(host, 1);
+#endif
 	/*
 	 * Need to drop the host lock here; mmc_request_done may call
 	 * back into the driver...
@@ -88,7 +154,6 @@
 static void
 msmsdcc_stop_data(struct msmsdcc_host *host)
 {
-	writel(0, host->base + MMCIDATACTRL);
 	host->curr.data = NULL;
 	host->curr.got_dataend = host->curr.got_datablkend = 0;
 }
@@ -109,6 +174,31 @@
 	return 0;
 }
 
+static inline void
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
+       msmsdcc_writel(host, arg, MMCIARGUMENT);
+       msmsdcc_writel(host, c, MMCICOMMAND);
+}
+
+static void
+msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
+{
+	struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+
+	msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
+	msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
+		       MMCIDATALENGTH);
+	msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+	msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+
+	if (host->cmd_cmd) {
+		msmsdcc_start_command_exec(host,
+					   (u32) host->cmd_cmd->arg,
+					   (u32) host->cmd_c);
+	}
+	host->dma.active = 1;
+}
+
 static void
 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
 			  unsigned int result,
@@ -121,8 +211,11 @@
 	struct mmc_request	*mrq;
 
 	spin_lock_irqsave(&host->lock, flags);
+	host->dma.active = 0;
+
 	mrq = host->curr.mrq;
 	BUG_ON(!mrq);
+	WARN_ON(!mrq->data);
 
 	if (!(result & DMOV_RSLT_VALID)) {
 		pr_err("msmsdcc: Invalid DataMover result\n");
@@ -146,7 +239,6 @@
 		if (!mrq->data->error)
 			mrq->data->error = -EIO;
 	}
-	host->dma.busy = 0;
 	dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
 		     host->dma.dir);
 
@@ -159,6 +251,7 @@
 	}
 
 	host->dma.sg = NULL;
+	host->dma.busy = 0;
 
 	if ((host->curr.got_dataend && host->curr.got_datablkend)
 	     || mrq->data->error) {
@@ -172,12 +265,14 @@
 		if (!mrq->data->error)
 			host->curr.data_xfered = host->curr.xfer_size;
 		if (!mrq->data->stop || mrq->cmd->error) {
-			writel(0, host->base + MMCICOMMAND);
 			host->curr.mrq = NULL;
 			host->curr.cmd = NULL;
 			mrq->data->bytes_xfered = host->curr.data_xfered;
 
 			spin_unlock_irqrestore(&host->lock, flags);
+#if BUSCLK_PWRSAVE
+			msmsdcc_disable_clocks(host, 1);
+#endif
 			mmc_request_done(host->mmc, mrq);
 			return;
 		} else
@@ -218,6 +313,8 @@
 	host->dma.sg = data->sg;
 	host->dma.num_ents = data->sg_len;
 
+       BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+
 	nc = host->dma.nc;
 
 	switch (host->pdev_id) {
@@ -246,22 +343,15 @@
 
 	host->curr.user_pages = 0;
 
-	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-		       host->dma.num_ents, host->dma.dir);
-
-	if (n != host->dma.num_ents) {
-		pr_err("%s: Unable to map in all sg elements\n",
-		       mmc_hostname(host->mmc));
-		host->dma.sg = NULL;
-		host->dma.num_ents = 0;
-		return -ENOMEM;
-	}
-
 	box = &nc->cmd[0];
 	for (i = 0; i < host->dma.num_ents; i++) {
 		box->cmd = CMD_MODE_BOX;
 
-		if (i == (host->dma.num_ents - 1))
+	/* Initialize sg dma address */
+	sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
+				+ sg->offset;
+
+	if (i == (host->dma.num_ents - 1))
 			box->cmd |= CMD_LC;
 		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
 			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -300,15 +390,70 @@
 			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
 	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
 
+	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+			host->dma.num_ents, host->dma.dir);
+/* dsb inside dma_map_sg will write nc out to mem as well */
+
+	if (n != host->dma.num_ents) {
+		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+			mmc_hostname(host->mmc));
+		host->dma.sg = NULL;
+		host->dma.num_ents = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int
+snoop_cccr_abort(struct mmc_command *cmd)
+{
+	if ((cmd->opcode == 52) &&
+	    (cmd->arg & 0x80000000) &&
+	    (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
+		return 1;
 	return 0;
 }
 
 static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+msmsdcc_start_command_deferred(struct msmsdcc_host *host,
+				struct mmc_command *cmd, u32 *c)
+{
+	*c |= (cmd->opcode | MCI_CPSM_ENABLE);
+
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		if (cmd->flags & MMC_RSP_136)
+			*c |= MCI_CPSM_LONGRSP;
+		*c |= MCI_CPSM_RESPONSE;
+	}
+
+	if (/*interrupt*/0)
+		*c |= MCI_CPSM_INTERRUPT;
+
+	if ((((cmd->opcode == 17) || (cmd->opcode == 18))  ||
+	     ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
+	      (cmd->opcode == 53))
+		*c |= MCI_CSPM_DATCMD;
+
+	if (cmd == cmd->mrq->stop)
+		*c |= MCI_CSPM_MCIABORT;
+
+	if (snoop_cccr_abort(cmd))
+		*c |= MCI_CSPM_MCIABORT;
+
+	if (host->curr.cmd != NULL) {
+		printk(KERN_ERR "%s: Overlapping command requests\n",
+			mmc_hostname(host->mmc));
+	}
+	host->curr.cmd = cmd;
+}
+
+static void
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
+			struct mmc_command *cmd, u32 c)
 {
 	unsigned int datactrl, timeout;
 	unsigned long long clks;
-	void __iomem *base = host->base;
 	unsigned int pio_irqmask = 0;
 
 	host->curr.data = data;
@@ -320,13 +465,6 @@
 
 	memset(&host->pio, 0, sizeof(host->pio));
 
-	clks = (unsigned long long)data->timeout_ns * host->clk_rate;
-	do_div(clks, NSEC_PER_SEC);
-	timeout = data->timeout_clks + (unsigned int)clks;
-	writel(timeout, base + MMCIDATATIMER);
-
-	writel(host->curr.xfer_size, base + MMCIDATALENGTH);
-
 	datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
 
 	if (!msmsdcc_config_dma(host, data))
@@ -347,47 +485,51 @@
 	if (data->flags & MMC_DATA_READ)
 		datactrl |= MCI_DPSM_DIRECTION;
 
-	writel(pio_irqmask, base + MMCIMASK1);
-	writel(datactrl, base + MMCIDATACTRL);
+	clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+	do_div(clks, NSEC_PER_SEC);
+	timeout = data->timeout_clks + (unsigned int)clks*2 ;
 
 	if (datactrl & MCI_DPSM_DMAENABLE) {
+		/* Save parameters for the exec function */
+		host->cmd_timeout = timeout;
+		host->cmd_pio_irqmask = pio_irqmask;
+		host->cmd_datactrl = datactrl;
+		host->cmd_cmd = cmd;
+
+		host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
+		host->dma.hdr.data = (void *)host;
 		host->dma.busy = 1;
+
+		if (cmd) {
+			msmsdcc_start_command_deferred(host, cmd, &c);
+			host->cmd_c = c;
+		}
 		msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+	} else {
+		msmsdcc_writel(host, timeout, MMCIDATATIMER);
+
+		msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+
+		msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+		msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+
+		if (cmd) {
+			/* Daisy-chain the command if requested */
+			msmsdcc_start_command(host, cmd, c);
+		}
 	}
 }
 
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
 {
-	void __iomem *base = host->base;
-
-	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
-		writel(0, base + MMCICOMMAND);
-		udelay(2 + ((5 * 1000000) / host->clk_rate));
-	}
-
-	c |= cmd->opcode | MCI_CPSM_ENABLE;
-
-	if (cmd->flags & MMC_RSP_PRESENT) {
-		if (cmd->flags & MMC_RSP_136)
-			c |= MCI_CPSM_LONGRSP;
-		c |= MCI_CPSM_RESPONSE;
-	}
-
-	if (cmd->opcode == 17 || cmd->opcode == 18 ||
-	    cmd->opcode == 24 || cmd->opcode == 25 ||
-	    cmd->opcode == 53)
-		c |= MCI_CSPM_DATCMD;
-
 	if (cmd == cmd->mrq->stop)
 		c |= MCI_CSPM_MCIABORT;
 
-	host->curr.cmd = cmd;
-
 	host->stats.cmds++;
 
-	writel(cmd->arg, base + MMCIARGUMENT);
-	writel(c, base + MMCICOMMAND);
+	msmsdcc_start_command_deferred(host, cmd, &c);
+	msmsdcc_start_command_exec(host, cmd->arg, c);
 }
 
 static void
@@ -421,13 +563,11 @@
 static int
 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
 {
-	void __iomem	*base = host->base;
 	uint32_t	*ptr = (uint32_t *) buffer;
 	int		count = 0;
 
-	while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
-
-		*ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+	while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
+		*ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
 		ptr++;
 		count += sizeof(uint32_t);
 
@@ -459,7 +599,7 @@
 		if (remain == 0)
 			break;
 
-		status = readl(base + MMCISTATUS);
+		status = msmsdcc_readl(host, MMCISTATUS);
 	} while (status & MCI_TXFIFOHALFEMPTY);
 
 	return ptr - buffer;
@@ -469,7 +609,7 @@
 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
 {
 	while (maxspin) {
-		if ((readl(host->base + MMCISTATUS) & mask))
+		if ((msmsdcc_readl(host, MMCISTATUS) & mask))
 			return 0;
 		udelay(1);
 		--maxspin;
@@ -477,14 +617,13 @@
 	return -ETIMEDOUT;
 }
 
-static int
+static irqreturn_t
 msmsdcc_pio_irq(int irq, void *dev_id)
 {
 	struct msmsdcc_host	*host = dev_id;
-	void __iomem		*base = host->base;
 	uint32_t		status;
 
-	status = readl(base + MMCISTATUS);
+	status = msmsdcc_readl(host, MMCISTATUS);
 
 	do {
 		unsigned long flags;
@@ -539,14 +678,14 @@
 			host->pio.sg_off = 0;
 		}
 
-		status = readl(base + MMCISTATUS);
+		status = msmsdcc_readl(host, MMCISTATUS);
 	} while (1);
 
 	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
-		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+		msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
 
 	if (!host->curr.xfer_remain)
-		writel(0, base + MMCIMASK1);
+		msmsdcc_writel(host, 0, MMCIMASK1);
 
 	return IRQ_HANDLED;
 }
@@ -554,15 +693,13 @@
 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
 {
 	struct mmc_command *cmd = host->curr.cmd;
-	void __iomem	   *base = host->base;
 
 	host->curr.cmd = NULL;
-	cmd->resp[0] = readl(base + MMCIRESPONSE0);
-	cmd->resp[1] = readl(base + MMCIRESPONSE1);
-	cmd->resp[2] = readl(base + MMCIRESPONSE2);
-	cmd->resp[3] = readl(base + MMCIRESPONSE3);
+	cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
+	cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
+	cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
+	cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
 
-	del_timer(&host->command_timer);
 	if (status & MCI_CMDTIMEOUT) {
 		cmd->error = -ETIMEDOUT;
 	} else if (status & MCI_CMDCRCFAIL &&
@@ -580,8 +717,10 @@
 			msmsdcc_request_end(host, cmd->mrq);
 		} else /* host->data == NULL */
 			msmsdcc_request_end(host, cmd->mrq);
-	} else if (!(cmd->data->flags & MMC_DATA_READ))
-		msmsdcc_start_data(host, cmd->data);
+	} else if (cmd->data)
+		if (!(cmd->data->flags & MMC_DATA_READ))
+			msmsdcc_start_data(host, cmd->data,
+						NULL, 0);
 }
 
 static void
@@ -590,6 +729,11 @@
 {
 	struct mmc_data *data = host->curr.data;
 
+	if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+	              MCI_CMDTIMEOUT) && host->curr.cmd) {
+		msmsdcc_do_cmdirq(host, status);
+	}
+
 	if (!data)
 		return;
 
@@ -602,7 +746,8 @@
 			msm_dmov_stop_cmd(host->dma.channel,
 					  &host->dma.hdr, 0);
 		else {
-			msmsdcc_stop_data(host);
+			if (host->curr.data)
+				msmsdcc_stop_data(host);
 			if (!data->stop)
 				msmsdcc_request_end(host, data->mrq);
 			else
@@ -657,18 +802,19 @@
 	spin_lock(&host->lock);
 
 	do {
-		status = readl(base + MMCISTATUS);
+		status = msmsdcc_readl(host, MMCISTATUS);
+		status &= (msmsdcc_readl(host, MMCIMASK0) |
+					      MCI_DATABLOCKENDMASK);
+		msmsdcc_writel(host, status, MMCICLEAR);
 
-		status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
-		writel(status, base + MMCICLEAR);
+		if (status & MCI_SDIOINTR)
+			status &= ~MCI_SDIOINTR;
+
+		if (!status)
+			break;
 
 		msmsdcc_handle_irq_data(host, status, base);
 
-		if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
-			      MCI_CMDTIMEOUT) && host->curr.cmd) {
-			msmsdcc_do_cmdirq(host, status);
-		}
-
 		if (status & MCI_SDIOINTOPER) {
 			cardint = 1;
 			status &= ~MCI_SDIOINTOPER;
@@ -714,24 +860,27 @@
 		return;
 	}
 
+	msmsdcc_enable_clocks(host);
+
 	host->curr.mrq = mrq;
 
 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
-		msmsdcc_start_data(host, mrq->data);
-
-	msmsdcc_start_command(host, mrq->cmd, 0);
+		/* Queue/read data, daisy-chain command when data starts */
+		msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+	else
+		msmsdcc_start_command(host, mrq->cmd, 0);
 
 	if (host->cmdpoll && !msmsdcc_spin_on_status(host,
 				MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
 				CMD_SPINMAX)) {
-		uint32_t status = readl(host->base + MMCISTATUS);
+		uint32_t status = msmsdcc_readl(host, MMCISTATUS);
 		msmsdcc_do_cmdirq(host, status);
-		writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
-		       host->base + MMCICLEAR);
+		msmsdcc_writel(host,
+			       MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+			       MMCICLEAR);
 		host->stats.cmdpoll_hits++;
 	} else {
 		host->stats.cmdpoll_misses++;
-		mod_timer(&host->command_timer, jiffies + HZ);
 	}
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -742,14 +891,13 @@
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	u32 clk = 0, pwr = 0;
 	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	msmsdcc_enable_clocks(host);
 
 	if (ios->clock) {
-
-		if (!host->clks_on) {
-			clk_enable(host->pclk);
-			clk_enable(host->clk);
-			host->clks_on = 1;
-		}
 		if (ios->clock != host->clk_rate) {
 			rc = clk_set_rate(host->clk, ios->clock);
 			if (rc < 0)
@@ -787,18 +935,16 @@
 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
 		pwr |= MCI_OD;
 
-	writel(clk, host->base + MMCICLOCK);
+	msmsdcc_writel(host, clk, MMCICLOCK);
 
 	if (host->pwr != pwr) {
 		host->pwr = pwr;
-		writel(pwr, host->base + MMCIPOWER);
+		msmsdcc_writel(host, pwr, MMCIPOWER);
 	}
-
-	if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
-		clk_disable(host->clk);
-		clk_disable(host->pclk);
-		host->clks_on = 0;
-	}
+#if BUSCLK_PWRSAVE
+	msmsdcc_disable_clocks(host, 1);
+#endif
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -809,13 +955,13 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (msmsdcc_sdioirq == 1) {
-		status = readl(host->base + MMCIMASK0);
+		status = msmsdcc_readl(host, MMCIMASK0);
 		if (enable)
 			status |= MCI_SDIOINTOPERMASK;
 		else
 			status &= ~MCI_SDIOINTOPERMASK;
 		host->saved_irq0mask = status;
-		writel(status, host->base + MMCIMASK0);
+		msmsdcc_writel(host, status, MMCIMASK0);
 	}
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -875,42 +1021,13 @@
 	msmsdcc_check_status((unsigned long) host);
 }
 
-/*
- * called when a command expires.
- * Dump some debugging, and then error
- * out the transaction.
- */
 static void
-msmsdcc_command_expired(unsigned long _data)
+msmsdcc_busclk_expired(unsigned long _data)
 {
 	struct msmsdcc_host	*host = (struct msmsdcc_host *) _data;
-	struct mmc_request	*mrq;
-	unsigned long		flags;
 
-	spin_lock_irqsave(&host->lock, flags);
-	mrq = host->curr.mrq;
-
-	if (!mrq) {
-		pr_info("%s: Command expiry misfire\n",
-			mmc_hostname(host->mmc));
-		spin_unlock_irqrestore(&host->lock, flags);
-		return;
-	}
-
-	pr_err("%s: Command timeout (%p %p %p %p)\n",
-	       mmc_hostname(host->mmc), mrq, mrq->cmd,
-	       mrq->data, host->dma.sg);
-
-	mrq->cmd->error = -ETIMEDOUT;
-	msmsdcc_stop_data(host);
-
-	writel(0, host->base + MMCICOMMAND);
-
-	host->curr.mrq = NULL;
-	host->curr.cmd = NULL;
-
-	spin_unlock_irqrestore(&host->lock, flags);
-	mmc_request_done(host->mmc, mrq);
+	if (host->clks_on)
+		msmsdcc_disable_clocks(host, 0);
 }
 
 static int
@@ -1012,6 +1129,7 @@
 	host->pdev_id = pdev->id;
 	host->plat = plat;
 	host->mmc = mmc;
+	host->curr.cmd = NULL;
 
 	host->cmdpoll = 1;
 
@@ -1027,36 +1145,35 @@
 	host->dmares = dmares;
 	spin_lock_init(&host->lock);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (plat->embedded_sdio)
+		mmc_set_embedded_sdio_data(mmc,
+					   &plat->embedded_sdio->cis,
+					   &plat->embedded_sdio->cccr,
+					   plat->embedded_sdio->funcs,
+					   plat->embedded_sdio->num_funcs);
+#endif
+
 	/*
 	 * Setup DMA
 	 */
 	msmsdcc_init_dma(host);
 
-	/*
-	 * Setup main peripheral bus clock
-	 */
+	/* Get our clocks */
 	host->pclk = clk_get(&pdev->dev, "sdc_pclk");
 	if (IS_ERR(host->pclk)) {
 		ret = PTR_ERR(host->pclk);
 		goto host_free;
 	}
 
-	ret = clk_enable(host->pclk);
-	if (ret)
-		goto pclk_put;
-
-	host->pclk_rate = clk_get_rate(host->pclk);
-
-	/*
-	 * Setup SDC MMC clock
-	 */
 	host->clk = clk_get(&pdev->dev, "sdc_clk");
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
-		goto pclk_disable;
+		goto pclk_put;
 	}
 
-	ret = clk_enable(host->clk);
+	/* Enable clocks */
+	ret = msmsdcc_enable_clocks(host);
 	if (ret)
 		goto clk_put;
 
@@ -1066,10 +1183,9 @@
 		goto clk_disable;
 	}
 
+	host->pclk_rate = clk_get_rate(host->pclk);
 	host->clk_rate = clk_get_rate(host->clk);
 
-	host->clks_on = 1;
-
 	/*
 	 * Setup MMC host structure
 	 */
@@ -1092,10 +1208,10 @@
 	mmc->max_req_size = 33554432;	/* MCI_DATA_LENGTH is 25 bits */
 	mmc->max_seg_size = mmc->max_req_size;
 
-	writel(0, host->base + MMCIMASK0);
-	writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+	msmsdcc_writel(host, 0, MMCIMASK0);
+	msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
 
-	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+	msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
 	host->saved_irq0mask = MCI_IRQENABLE;
 
 	/*
@@ -1137,13 +1253,9 @@
 		host->eject = !host->oldstat;
 	}
 
-	/*
-	 * Setup a command timer. We currently need this due to
-	 * some 'strange' timeout / error handling situations.
-	 */
-	init_timer(&host->command_timer);
-	host->command_timer.data = (unsigned long) host;
-	host->command_timer.function = msmsdcc_command_expired;
+	init_timer(&host->busclk_timer);
+	host->busclk_timer.data = (unsigned long) host;
+	host->busclk_timer.function = msmsdcc_busclk_expired;
 
 	ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
 			  DRIVER_NAME " (cmd)", host);
@@ -1181,6 +1293,9 @@
 	if (host->timer.function)
 		pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
+#if BUSCLK_PWRSAVE
+	msmsdcc_disable_clocks(host, 1);
+#endif
 	return 0;
  cmd_irq_free:
 	free_irq(cmd_irqres->start, host);
@@ -1188,11 +1303,9 @@
 	if (host->stat_irq)
 		free_irq(host->stat_irq, host);
  clk_disable:
-	clk_disable(host->clk);
+	msmsdcc_disable_clocks(host, 0);
  clk_put:
 	clk_put(host->clk);
- pclk_disable:
-	clk_disable(host->pclk);
  pclk_put:
 	clk_put(host->pclk);
  host_free:
@@ -1215,15 +1328,10 @@
 
 		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
 			rc = mmc_suspend_host(mmc, state);
-		if (!rc) {
-			writel(0, host->base + MMCIMASK0);
-
-			if (host->clks_on) {
-				clk_disable(host->clk);
-				clk_disable(host->pclk);
-				host->clks_on = 0;
-			}
-		}
+		if (!rc)
+			msmsdcc_writel(host, 0, MMCIMASK0);
+		if (host->clks_on)
+			msmsdcc_disable_clocks(host, 0);
 	}
 	return rc;
 }
@@ -1232,27 +1340,21 @@
 msmsdcc_resume(struct platform_device *dev)
 {
 	struct mmc_host *mmc = mmc_get_drvdata(dev);
-	unsigned long flags;
 
 	if (mmc) {
 		struct msmsdcc_host *host = mmc_priv(mmc);
 
-		spin_lock_irqsave(&host->lock, flags);
+		msmsdcc_enable_clocks(host);
 
-		if (!host->clks_on) {
-			clk_enable(host->pclk);
-			clk_enable(host->clk);
-			host->clks_on = 1;
-		}
-
-		writel(host->saved_irq0mask, host->base + MMCIMASK0);
-
-		spin_unlock_irqrestore(&host->lock, flags);
+		msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
 
 		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
 			mmc_resume_host(mmc);
 		if (host->stat_irq)
 			enable_irq(host->stat_irq);
+#if BUSCLK_PWRSAVE
+		msmsdcc_disable_clocks(host, 1);
+#endif
 	}
 	return 0;
 }
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 8c84484..da0039c 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -171,6 +171,7 @@
 	int				channel;
 	struct msmsdcc_host		*host;
 	int				busy; /* Set if DM is busy */
+	int				active;
 };
 
 struct msmsdcc_pio_data {
@@ -213,7 +214,7 @@
 	struct clk		*clk;		/* main MMC bus clock */
 	struct clk		*pclk;		/* SDCC peripheral bus clock */
 	unsigned int		clks_on;	/* set if clocks are enabled */
-	struct timer_list	command_timer;
+	struct timer_list	busclk_timer;
 
 	unsigned int		eject;		/* eject state */
 
@@ -233,6 +234,18 @@
 	struct msmsdcc_pio_data	pio;
 	int			cmdpoll;
 	struct msmsdcc_stats	stats;
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+	struct work_struct	resume_task;
+#endif
+
+	/* Command parameters */
+	unsigned int		cmd_timeout;
+	unsigned int		cmd_pio_irqmask;
+	unsigned int		cmd_datactrl;
+	struct mmc_command	*cmd_cmd;
+	u32			cmd_c;
+
 };
 
 #endif
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e..13343e8 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
 #include <linux/timer.h>
+#include <linux/semaphore.h>
 #include <linux/workqueue.h>
 
 #include <linux/mlx4/device.h>
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index c61a61f..6ce6ce1 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2560,7 +2560,8 @@
 }
 
 static ssize_t
-netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2587,7 +2588,8 @@
 }
 
 static ssize_t
-netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2627,7 +2629,8 @@
 }
 
 static ssize_t
-netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2647,7 +2650,7 @@
 	return size;
 }
 
-static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
 		struct bin_attribute *attr, char *buf,
 		loff_t offset, size_t size)
 {
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 1003eb7..23ea9ca 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2464,7 +2464,8 @@
 }
 
 static ssize_t
-qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2488,7 +2489,8 @@
 }
 
 static ssize_t
-qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2525,7 +2527,8 @@
 }
 
 static ssize_t
-qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2546,7 +2549,8 @@
 }
 
 static ssize_t
-qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t offset, size_t size)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 6ecbfb2..e525263 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -108,7 +108,7 @@
 static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
 static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
 static int ibm_get_table_from_acpi(char **bufp);
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
 				   struct bin_attribute *bin_attr,
 				   char *buffer, loff_t pos, size_t size);
 static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
@@ -351,6 +351,7 @@
 
 /**
  * ibm_read_apci_table - callback for the sysfs apci_table file
+ * @filp: the open sysfs file
  * @kobj: the kobject this binary attribute is a part of
  * @bin_attr: struct bin_attribute for this file
  * @buffer: the kernel space buffer to fill
@@ -364,7 +365,7 @@
  * things get really tricky here...
  * our solution is to only allow reading the table in all at once.
  */
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
 				   struct bin_attribute *bin_attr,
 				   char *buffer, loff_t pos, size_t size)
 {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fad9398..6309c5a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -21,6 +21,7 @@
 #include <linux/stat.h>
 #include <linux/topology.h>
 #include <linux/mm.h>
+#include <linux/fs.h>
 #include <linux/capability.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
@@ -357,7 +358,8 @@
 struct device_attribute vga_attr = __ATTR_RO(boot_vga);
 
 static ssize_t
-pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_config(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -366,7 +368,7 @@
 	u8 *data = (u8*) buf;
 
 	/* Several chips lock up trying to read undefined config space */
-	if (capable(CAP_SYS_ADMIN)) {
+	if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
 		size = dev->cfg_size;
 	} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
 		size = 128;
@@ -430,7 +432,8 @@
 }
 
 static ssize_t
-pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_config(struct file* filp, struct kobject *kobj,
+		 struct bin_attribute *bin_attr,
 		 char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -487,7 +490,8 @@
 }
 
 static ssize_t
-read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+read_vpd_attr(struct file *filp, struct kobject *kobj,
+	      struct bin_attribute *bin_attr,
 	      char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *dev =
@@ -502,7 +506,8 @@
 }
 
 static ssize_t
-write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+write_vpd_attr(struct file *filp, struct kobject *kobj,
+	       struct bin_attribute *bin_attr,
 	       char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *dev =
@@ -519,6 +524,7 @@
 #ifdef HAVE_PCI_LEGACY
 /**
  * pci_read_legacy_io - read byte(s) from legacy I/O port space
+ * @filp: open sysfs file
  * @kobj: kobject corresponding to file to read from
  * @bin_attr: struct bin_attribute for this file
  * @buf: buffer to store results
@@ -529,7 +535,8 @@
  * callback routine (pci_legacy_read).
  */
 static ssize_t
-pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr,
 		   char *buf, loff_t off, size_t count)
 {
         struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -545,6 +552,7 @@
 
 /**
  * pci_write_legacy_io - write byte(s) to legacy I/O port space
+ * @filp: open sysfs file
  * @kobj: kobject corresponding to file to read from
  * @bin_attr: struct bin_attribute for this file
  * @buf: buffer containing value to be written
@@ -555,7 +563,8 @@
  * callback routine (pci_legacy_write).
  */
 static ssize_t
-pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+		    struct bin_attribute *bin_attr,
 		    char *buf, loff_t off, size_t count)
 {
         struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -570,6 +579,7 @@
 
 /**
  * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
+ * @filp: open sysfs file
  * @kobj: kobject corresponding to device to be mapped
  * @attr: struct bin_attribute for this file
  * @vma: struct vm_area_struct passed to mmap
@@ -579,7 +589,8 @@
  * memory space.
  */
 static int
-pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+		    struct bin_attribute *attr,
                     struct vm_area_struct *vma)
 {
         struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -591,6 +602,7 @@
 
 /**
  * pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @filp: open sysfs file
  * @kobj: kobject corresponding to device to be mapped
  * @attr: struct bin_attribute for this file
  * @vma: struct vm_area_struct passed to mmap
@@ -600,7 +612,8 @@
  * memory space. Returns -ENOSYS if the operation isn't supported
  */
 static int
-pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *attr,
 		   struct vm_area_struct *vma)
 {
         struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -750,14 +763,16 @@
 }
 
 static int
-pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+		     struct bin_attribute *attr,
 		     struct vm_area_struct *vma)
 {
 	return pci_mmap_resource(kobj, attr, vma, 0);
 }
 
 static int
-pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+		     struct bin_attribute *attr,
 		     struct vm_area_struct *vma)
 {
 	return pci_mmap_resource(kobj, attr, vma, 1);
@@ -861,6 +876,7 @@
 
 /**
  * pci_write_rom - used to enable access to the PCI ROM display
+ * @filp: sysfs file
  * @kobj: kernel object handle
  * @bin_attr: struct bin_attribute for this file
  * @buf: user input
@@ -870,7 +886,8 @@
  * writing anything except 0 enables it
  */
 static ssize_t
-pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_rom(struct file *filp, struct kobject *kobj,
+	      struct bin_attribute *bin_attr,
 	      char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -885,6 +902,7 @@
 
 /**
  * pci_read_rom - read a PCI ROM
+ * @filp: sysfs file
  * @kobj: kernel object handle
  * @bin_attr: struct bin_attribute for this file
  * @buf: where to put the data we read from the ROM
@@ -895,7 +913,8 @@
  * device corresponding to @kobj.
  */
 static ssize_t
-pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_rom(struct file *filp, struct kobject *kobj,
+	     struct bin_attribute *bin_attr,
 	     char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 60d428b..8844bc3e 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1531,7 +1531,7 @@
 }
 
 
-static ssize_t pccard_show_cis(struct kobject *kobj,
+static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
@@ -1562,7 +1562,7 @@
 }
 
 
-static ssize_t pccard_store_cis(struct kobject *kobj,
+static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 8fefe5a..baefcf1 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -354,7 +354,7 @@
 #define EEPROM_END	0x80
 #define EEPROM_SIZE	(EEPROM_END - EEPROM_START)
 
-static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
+static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
 		struct bin_attribute *attr, char *buf, loff_t off, size_t count)
 {
 	uint8_t ec_byte;
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index ba742e8..00b4756 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -68,7 +68,8 @@
 };
 
 static ssize_t
-rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_read_config(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct rio_dev *dev =
@@ -139,7 +140,8 @@
 }
 
 static ssize_t
-rio_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_write_config(struct file *filp, struct kobject *kobj,
+		 struct bin_attribute *bin_attr,
 		 char *buf, loff_t off, size_t count)
 {
 	struct rio_dev *dev =
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e9aa814..96e8e70 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -238,31 +238,32 @@
 	rtc_control = CMOS_READ(RTC_CONTROL);
 	spin_unlock_irq(&rtc_lock);
 
-	/* REVISIT this assumes PC style usage:  always BCD */
-
-	if (((unsigned)t->time.tm_sec) < 0x60)
-		t->time.tm_sec = bcd2bin(t->time.tm_sec);
-	else
-		t->time.tm_sec = -1;
-	if (((unsigned)t->time.tm_min) < 0x60)
-		t->time.tm_min = bcd2bin(t->time.tm_min);
-	else
-		t->time.tm_min = -1;
-	if (((unsigned)t->time.tm_hour) < 0x24)
-		t->time.tm_hour = bcd2bin(t->time.tm_hour);
-	else
-		t->time.tm_hour = -1;
-
-	if (cmos->day_alrm) {
-		if (((unsigned)t->time.tm_mday) <= 0x31)
-			t->time.tm_mday = bcd2bin(t->time.tm_mday);
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+		if (((unsigned)t->time.tm_sec) < 0x60)
+			t->time.tm_sec = bcd2bin(t->time.tm_sec);
 		else
-			t->time.tm_mday = -1;
-		if (cmos->mon_alrm) {
-			if (((unsigned)t->time.tm_mon) <= 0x12)
-				t->time.tm_mon = bcd2bin(t->time.tm_mon) - 1;
+			t->time.tm_sec = -1;
+		if (((unsigned)t->time.tm_min) < 0x60)
+			t->time.tm_min = bcd2bin(t->time.tm_min);
+		else
+			t->time.tm_min = -1;
+		if (((unsigned)t->time.tm_hour) < 0x24)
+			t->time.tm_hour = bcd2bin(t->time.tm_hour);
+		else
+			t->time.tm_hour = -1;
+
+		if (cmos->day_alrm) {
+			if (((unsigned)t->time.tm_mday) <= 0x31)
+				t->time.tm_mday = bcd2bin(t->time.tm_mday);
 			else
-				t->time.tm_mon = -1;
+				t->time.tm_mday = -1;
+
+			if (cmos->mon_alrm) {
+				if (((unsigned)t->time.tm_mon) <= 0x12)
+					t->time.tm_mon = bcd2bin(t->time.tm_mon)-1;
+				else
+					t->time.tm_mon = -1;
+			}
 		}
 	}
 	t->time.tm_year = -1;
@@ -322,29 +323,26 @@
 static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
-	unsigned char	mon, mday, hrs, min, sec;
+       unsigned char   mon, mday, hrs, min, sec, rtc_control;
 
 	if (!is_valid_irq(cmos->irq))
 		return -EIO;
 
-	/* REVISIT this assumes PC style usage:  always BCD */
-
-	/* Writing 0xff means "don't care" or "match all".  */
-
 	mon = t->time.tm_mon + 1;
-	mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
-
 	mday = t->time.tm_mday;
-	mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
-
 	hrs = t->time.tm_hour;
-	hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
-
 	min = t->time.tm_min;
-	min = (min < 60) ? bin2bcd(min) : 0xff;
-
 	sec = t->time.tm_sec;
-	sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+
+	rtc_control = CMOS_READ(RTC_CONTROL);
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+		/* Writing 0xff means "don't care" or "match all".  */
+		mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
+		mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
+		hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
+		min = (min < 60) ? bin2bcd(min) : 0xff;
+		sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+	}
 
 	spin_lock_irq(&rtc_lock);
 
@@ -478,7 +476,7 @@
 			"update_IRQ\t: %s\n"
 			"HPET_emulated\t: %s\n"
 			// "square_wave\t: %s\n"
-			// "BCD\t\t: %s\n"
+			"BCD\t\t: %s\n"
 			"DST_enable\t: %s\n"
 			"periodic_freq\t: %d\n"
 			"batt_status\t: %s\n",
@@ -486,7 +484,7 @@
 			(rtc_control & RTC_UIE) ? "yes" : "no",
 			is_hpet_enabled() ? "yes" : "no",
 			// (rtc_control & RTC_SQWE) ? "yes" : "no",
-			// (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
+			(rtc_control & RTC_DM_BINARY) ? "no" : "yes",
 			(rtc_control & RTC_DST_EN) ? "yes" : "no",
 			cmos->rtc->irq_freq,
 			(valid & RTC_VRT) ? "okay" : "dead");
@@ -519,7 +517,8 @@
 #define NVRAM_OFFSET	(RTC_REG_D + 1)
 
 static ssize_t
-cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	int	retval;
@@ -547,7 +546,8 @@
 }
 
 static ssize_t
-cmos_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_write(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct cmos_rtc	*cmos;
@@ -749,12 +749,11 @@
 
 	spin_unlock_irq(&rtc_lock);
 
-	/* FIXME teach the alarm code how to handle binary mode;
+	/* FIXME:
 	 * <asm-generic/rtc.h> doesn't know 12-hour mode either.
 	 */
-	if (is_valid_irq(rtc_irq) &&
-	    (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))) {
-		dev_dbg(dev, "only 24-hr BCD mode supported\n");
+       if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) {
+		dev_warn(dev, "only 24-hr supported\n");
 		retval = -ENXIO;
 		goto cleanup1;
 	}
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 7836c9c..48da85e 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -542,7 +542,8 @@
 }
 
 static ssize_t
-ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct spi_device	*spi;
@@ -572,7 +573,8 @@
 }
 
 static ssize_t
-ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_write(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct spi_device	*spi;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c4ec5c1..de033b7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -556,7 +556,8 @@
 #define NVRAM_SIZE	56
 
 static ssize_t
-ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct i2c_client	*client;
@@ -580,7 +581,8 @@
 }
 
 static ssize_t
-ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_write(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct i2c_client	*client;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 06b8566..37268e9 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -423,8 +423,9 @@
 };
 
  static ssize_t
-ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
-				char *buf, loff_t pos, size_t size)
+ds1511_nvram_read(struct file *filp, struct kobject *kobj,
+		  struct bin_attribute *ba,
+		  char *buf, loff_t pos, size_t size)
 {
 	ssize_t count;
 
@@ -452,8 +453,9 @@
 }
 
  static ssize_t
-ds1511_nvram_write(struct kobject *kobj, struct bin_attribute *bin_attr,
-				char *buf, loff_t pos, size_t size)
+ds1511_nvram_write(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr,
+		   char *buf, loff_t pos, size_t size)
 {
 	ssize_t count;
 
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 244f999..ff432e2 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -252,7 +252,7 @@
 	.update_irq_enable	= ds1553_rtc_update_irq_enable,
 };
 
-static ssize_t ds1553_nvram_read(struct kobject *kobj,
+static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t pos, size_t size)
 {
@@ -267,7 +267,7 @@
 	return count;
 }
 
-static ssize_t ds1553_nvram_write(struct kobject *kobj,
+static ssize_t ds1553_nvram_write(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *buf, loff_t pos, size_t size)
 {
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2b4b0bc..042630c 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -128,7 +128,7 @@
 	.set_time	= ds1742_rtc_set_time,
 };
 
-static ssize_t ds1742_nvram_read(struct kobject *kobj,
+static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t pos, size_t size)
 {
@@ -143,7 +143,7 @@
 	return count;
 }
 
-static ssize_t ds1742_nvram_write(struct kobject *kobj,
+static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *buf, loff_t pos, size_t size)
 {
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 365ff3a..be8359f 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -343,7 +343,7 @@
 	.set_time	= m48t59_rtc_set_time,
 };
 
-static ssize_t m48t59_nvram_read(struct kobject *kobj,
+static ssize_t m48t59_nvram_read(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t pos, size_t size)
 {
@@ -363,7 +363,7 @@
 	return cnt;
 }
 
-static ssize_t m48t59_nvram_write(struct kobject *kobj,
+static ssize_t m48t59_nvram_write(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t pos, size_t size)
 {
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index b53a001..3b94367 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -244,7 +244,7 @@
 	.alarm_irq_enable	= stk17ta8_rtc_alarm_irq_enable,
 };
 
-static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_read(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *attr, char *buf,
 				 loff_t pos, size_t size)
 {
@@ -259,7 +259,7 @@
 	return count;
 }
 
-static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_write(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *attr, char *buf,
 				  loff_t pos, size_t size)
 {
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 20bfc64..ec6313d 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -188,7 +188,7 @@
 	.alarm_irq_enable	= tx4939_rtc_alarm_irq_enable,
 };
 
-static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_read(struct file *filp, struct kobject *kobj,
 				     struct bin_attribute *bin_attr,
 				     char *buf, loff_t pos, size_t size)
 {
@@ -207,7 +207,7 @@
 	return count;
 }
 
-static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_write(struct file *filp, struct kobject *kobj,
 				      struct bin_attribute *bin_attr,
 				      char *buf, loff_t pos, size_t size)
 {
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1d16189..6c9fa15 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,8 @@
 /*
  * Channel measurement related functions
  */
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+					  struct kobject *kobj,
 					  struct bin_attribute *bin_attr,
 					  char *buf, loff_t off, size_t count)
 {
@@ -182,7 +183,7 @@
 	} while (reference_buf.values[0] != buf->values[0]);
 }
 
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
 				    struct bin_attribute *bin_attr,
 				    char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 54c5ffb..d38000d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -98,7 +98,7 @@
 /* Functions */
 
 /* This function returns AENs through sysfs */
-static ssize_t twl_sysfs_aen_read(struct kobject *kobj,
+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *outbuf, loff_t offset, size_t count)
 {
@@ -129,7 +129,7 @@
 };
 
 /* This function returns driver compatibility info through sysfs */
-static ssize_t twl_sysfs_compat_info(struct kobject *kobj,
+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
 				     struct bin_attribute *bin_attr,
 				     char *outbuf, loff_t offset, size_t count)
 {
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 5877f29..a4e04c5 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -59,7 +59,8 @@
 
 struct device_attribute *arcmsr_host_attrs[];
 
-static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
+					     struct kobject *kobj,
 					     struct bin_attribute *bin,
 					     char *buf, loff_t off,
 					     size_t count)
@@ -105,7 +106,8 @@
 	return (allxfer_len);
 }
 
-static ssize_t arcmsr_sysfs_iop_message_write(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
+					      struct kobject *kobj,
 					      struct bin_attribute *bin,
 					      char *buf, loff_t off,
 					      size_t count)
@@ -153,7 +155,8 @@
 	}
 }
 
-static ssize_t arcmsr_sysfs_iop_message_clear(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
+					      struct kobject *kobj,
 					      struct bin_attribute *bin,
 					      char *buf, loff_t off,
 					      size_t count)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d18f45c..3eb2b7b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2919,6 +2919,7 @@
 #ifdef CONFIG_SCSI_IBMVFC_TRACE
 /**
  * ibmvfc_read_trace - Dump the adapter trace
+ * @filp:		open sysfs file
  * @kobj:		kobject struct
  * @bin_attr:	bin_attribute struct
  * @buf:		buffer
@@ -2928,7 +2929,7 @@
  * Return value:
  *	number of bytes printed to buffer
  **/
-static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b90c118..6a6661c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3120,6 +3120,7 @@
 #ifdef CONFIG_SCSI_IPR_TRACE
 /**
  * ipr_read_trace - Dump the adapter trace
+ * @filp:		open sysfs file
  * @kobj:		kobject struct
  * @bin_attr:		bin_attribute struct
  * @buf:		buffer
@@ -3129,7 +3130,7 @@
  * Return value:
  *	number of bytes printed to buffer
  **/
-static ssize_t ipr_read_trace(struct kobject *kobj,
+static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t off, size_t count)
 {
@@ -3764,6 +3765,7 @@
 #ifdef CONFIG_SCSI_IPR_DUMP
 /**
  * ipr_read_dump - Dump the adapter
+ * @filp:		open sysfs file
  * @kobj:		kobject struct
  * @bin_attr:		bin_attribute struct
  * @buf:		buffer
@@ -3773,7 +3775,7 @@
  * Return value:
  *	number of bytes printed to buffer
  **/
-static ssize_t ipr_read_dump(struct kobject *kobj,
+static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr,
 			     char *buf, loff_t off, size_t count)
 {
@@ -3927,6 +3929,7 @@
 
 /**
  * ipr_write_dump - Setup dump state of adapter
+ * @filp:		open sysfs file
  * @kobj:		kobject struct
  * @bin_attr:		bin_attribute struct
  * @buf:		buffer
@@ -3936,7 +3939,7 @@
  * Return value:
  *	number of bytes printed to buffer
  **/
-static ssize_t ipr_write_dump(struct kobject *kobj,
+static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7608310..bf55d30 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -601,7 +601,7 @@
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
 	write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 
-	if (sk_sleep(sock->sk) && waitqueue_active(sk_sleep(sock->sk))) {
+	if (sk_sleep(sock->sk)) {
 		sock->sk->sk_err = EIO;
 		wake_up_interruptible(sk_sleep(sock->sk));
 	}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2e5f376..bf33b31 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2643,6 +2643,7 @@
 
 /**
  * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
  * @kobj: Pointer to the kernel object
  * @bin_attr: Attribute object
  * @buff: Buffer pointer
@@ -2654,7 +2655,8 @@
  * applications.
  **/
 static ssize_t
-sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device,
@@ -3362,6 +3364,7 @@
 
 /**
  * sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
  * @kobj: kernel kobject that contains the kernel class device.
  * @bin_attr: kernel attributes passed to us.
  * @buf: contains the data to be written to the adapter IOREG space.
@@ -3379,7 +3382,8 @@
  * value of count, buf contents written
  **/
 static ssize_t
-sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr,
 		   char *buf, loff_t off, size_t count)
 {
 	size_t buf_off;
@@ -3415,6 +3419,7 @@
 
 /**
  * sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
  * @kobj: kernel kobject that contains the kernel class device.
  * @bin_attr: kernel attributes passed to us.
  * @buf: if successful contains the data from the adapter IOREG space.
@@ -3431,7 +3436,8 @@
  * value of count, buf contents read
  **/
 static ssize_t
-sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+		  struct bin_attribute *bin_attr,
 		  char *buf, loff_t off, size_t count)
 {
 	size_t buf_off;
@@ -3496,6 +3502,7 @@
 
 /**
  * sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
  * @kobj: kernel kobject that contains the kernel class device.
  * @bin_attr: kernel attributes passed to us.
  * @buf: contains the data to be written to sysfs mbox.
@@ -3516,7 +3523,8 @@
  * count number of bytes transferred
  **/
 static ssize_t
-sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+		 struct bin_attribute *bin_attr,
 		 char *buf, loff_t off, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -3571,6 +3579,7 @@
 
 /**
  * sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
  * @kobj: kernel kobject that contains the kernel class device.
  * @bin_attr: kernel attributes passed to us.
  * @buf: contains the data to be read from sysfs mbox.
@@ -3593,7 +3602,8 @@
  * count number of bytes transferred
  **/
 static ssize_t
-sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3b70860..1e4cafa 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -16,7 +16,7 @@
 /* SYSFS attributes --------------------------------------------------------- */
 
 static ssize_t
-qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
@@ -32,7 +32,7 @@
 }
 
 static ssize_t
-qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
 			    struct bin_attribute *bin_attr,
 			    char *buf, loff_t off, size_t count)
 {
@@ -92,7 +92,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_nvram(struct kobject *kobj,
+qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
 			 struct bin_attribute *bin_attr,
 			 char *buf, loff_t off, size_t count)
 {
@@ -111,7 +111,7 @@
 }
 
 static ssize_t
-qla2x00_sysfs_write_nvram(struct kobject *kobj,
+qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
 			  struct bin_attribute *bin_attr,
 			  char *buf, loff_t off, size_t count)
 {
@@ -177,7 +177,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_optrom(struct kobject *kobj,
+qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
 			  struct bin_attribute *bin_attr,
 			  char *buf, loff_t off, size_t count)
 {
@@ -193,7 +193,7 @@
 }
 
 static ssize_t
-qla2x00_sysfs_write_optrom(struct kobject *kobj,
+qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
@@ -224,7 +224,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
+qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
@@ -387,7 +387,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_vpd(struct kobject *kobj,
+qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
@@ -408,7 +408,7 @@
 }
 
 static ssize_t
-qla2x00_sysfs_write_vpd(struct kobject *kobj,
+qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
 			struct bin_attribute *bin_attr,
 			char *buf, loff_t off, size_t count)
 {
@@ -461,7 +461,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_sfp(struct kobject *kobj,
+qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
@@ -522,7 +522,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_write_reset(struct kobject *kobj,
+qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
 			struct bin_attribute *bin_attr,
 			char *buf, loff_t off, size_t count)
 {
@@ -592,7 +592,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_write_edc(struct kobject *kobj,
+qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
 			struct bin_attribute *bin_attr,
 			char *buf, loff_t off, size_t count)
 {
@@ -650,7 +650,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_write_edc_status(struct kobject *kobj,
+qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
 			struct bin_attribute *bin_attr,
 			char *buf, loff_t off, size_t count)
 {
@@ -700,7 +700,7 @@
 }
 
 static ssize_t
-qla2x00_sysfs_read_edc_status(struct kobject *kobj,
+qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
@@ -730,7 +730,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
@@ -782,7 +782,7 @@
 };
 
 static ssize_t
-qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2b1ea3d..891e1dd 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1891,8 +1891,8 @@
 	struct uart_8250_port *up = (struct uart_8250_port *)port;
 	unsigned char lsr = serial_inp(up, UART_LSR);
 
-	while (!(lsr & UART_LSR_DR))
-		lsr = serial_inp(up, UART_LSR);
+	if (!(lsr & UART_LSR_DR))
+		return NO_POLL_CHAR;
 
 	return serial_inp(up, UART_RX);
 }
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 302836a..8b23165 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1423,8 +1423,8 @@
 	  Support for Console on SC2681/SC2692 serial ports.
 
 config SERIAL_BFIN_SPORT
-	tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)"
-	depends on BLACKFIN && EXPERIMENTAL
+	tristate "Blackfin SPORT emulate UART"
+	depends on BLACKFIN
 	select SERIAL_CORE
 	help
 	  Enable SPORT emulate UART on Blackfin series.
@@ -1439,28 +1439,52 @@
 
 config SERIAL_BFIN_SPORT0_UART
 	bool "Enable UART over SPORT0"
-	depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
+	depends on SERIAL_BFIN_SPORT && !(BF542 || BF544)
 	help
 	  Enable UART over SPORT0
 
+config SERIAL_BFIN_SPORT0_UART_CTSRTS
+	bool "Enable UART over SPORT0 hardware flow control"
+	depends on SERIAL_BFIN_SPORT0_UART
+	help
+	  Enable hardware flow control in the driver.
+
 config SERIAL_BFIN_SPORT1_UART
 	bool "Enable UART over SPORT1"
 	depends on SERIAL_BFIN_SPORT
 	help
 	  Enable UART over SPORT1
 
+config SERIAL_BFIN_SPORT1_UART_CTSRTS
+	bool "Enable UART over SPORT1 hardware flow control"
+	depends on SERIAL_BFIN_SPORT1_UART
+	help
+	  Enable hardware flow control in the driver.
+
 config SERIAL_BFIN_SPORT2_UART
 	bool "Enable UART over SPORT2"
 	depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
 	help
 	  Enable UART over SPORT2
 
+config SERIAL_BFIN_SPORT2_UART_CTSRTS
+	bool "Enable UART over SPORT2 hardware flow control"
+	depends on SERIAL_BFIN_SPORT2_UART
+	help
+	  Enable hardware flow control in the driver.
+
 config SERIAL_BFIN_SPORT3_UART
 	bool "Enable UART over SPORT3"
 	depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
 	help
 	  Enable UART over SPORT3
 
+config SERIAL_BFIN_SPORT3_UART_CTSRTS
+	bool "Enable UART over SPORT3 hardware flow control"
+	depends on SERIAL_BFIN_SPORT3_UART
+	help
+	  Enable hardware flow control in the driver.
+
 config SERIAL_TIMBERDALE
 	tristate "Support for timberdale UART"
 	select SERIAL_CORE
@@ -1499,4 +1523,56 @@
 	help
 	Support for running a console on the GRLIB APBUART
 
+config SERIAL_ALTERA_JTAGUART
+	tristate "Altera JTAG UART support"
+	select SERIAL_CORE
+	help
+	  This driver supports the Altera JTAG UART port.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE
+	bool "Altera JTAG UART console support"
+	depends on SERIAL_ALTERA_JTAGUART=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Enable a Altera JTAG UART port to be the system console.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS
+	bool "Bypass output when no connection"
+	depends on SERIAL_ALTERA_JTAGUART_CONSOLE
+	select SERIAL_CORE_CONSOLE
+	help
+	  Bypass console output and keep going even if there is no
+	  JTAG terminal connection with the host.
+
+config SERIAL_ALTERA_UART
+	tristate "Altera UART support"
+	select SERIAL_CORE
+	help
+	  This driver supports the Altera softcore UART port.
+
+config SERIAL_ALTERA_UART_MAXPORTS
+	int "Maximum number of Altera UART ports"
+	depends on SERIAL_ALTERA_UART
+	default 4
+	help
+	  This setting lets you define the maximum number of the Altera
+	  UART ports. The usual default varies from board to board, and
+	  this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_BAUDRATE
+	int "Default baudrate for Altera UART ports"
+	depends on SERIAL_ALTERA_UART
+	default 115200
+	help
+	  This setting lets you define what the default baudrate is for the
+	  Altera UART ports. The usual default varies from board to board,
+	  and this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_CONSOLE
+	bool "Altera UART console support"
+	depends on SERIAL_ALTERA_UART=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Enable a Altera UART port to be the system console.
+
 endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 328f107..208a855 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -82,3 +82,5 @@
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
 obj-$(CONFIG_SERIAL_TIMBERDALE)	+= timbuart.o
 obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/serial/altera_jtaguart.c
new file mode 100644
index 0000000..f9b49b5
--- /dev/null
+++ b/drivers/serial/altera_jtaguart.c
@@ -0,0 +1,504 @@
+/*
+ * altera_jtaguart.c -- Altera JTAG UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_jtaguart.h>
+
+#define DRV_NAME "altera_jtaguart"
+
+/*
+ * Altera JTAG UART register definitions according to the Altera JTAG UART
+ * datasheet: http://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
+ */
+
+#define ALTERA_JTAGUART_SIZE			8
+
+#define ALTERA_JTAGUART_DATA_REG		0
+
+#define ALTERA_JTAGUART_DATA_DATA_MSK		0x000000FF
+#define ALTERA_JTAGUART_DATA_RVALID_MSK		0x00008000
+#define ALTERA_JTAGUART_DATA_RAVAIL_MSK		0xFFFF0000
+#define ALTERA_JTAGUART_DATA_RAVAIL_OFF		16
+
+#define ALTERA_JTAGUART_CONTROL_REG		4
+
+#define ALTERA_JTAGUART_CONTROL_RE_MSK		0x00000001
+#define ALTERA_JTAGUART_CONTROL_WE_MSK		0x00000002
+#define ALTERA_JTAGUART_CONTROL_RI_MSK		0x00000100
+#define ALTERA_JTAGUART_CONTROL_RI_OFF		8
+#define ALTERA_JTAGUART_CONTROL_WI_MSK		0x00000200
+#define ALTERA_JTAGUART_CONTROL_AC_MSK		0x00000400
+#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK	0xFFFF0000
+#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF	16
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_jtaguart {
+	struct uart_port port;
+	unsigned int sigs;	/* Local copy of line sigs */
+	unsigned long imr;	/* Local IMR mirror */
+};
+
+static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
+{
+	return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+		ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
+{
+	return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+}
+
+static void altera_jtaguart_start_tx(struct uart_port *port)
+{
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+
+	pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK;
+	writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_tx(struct uart_port *port)
+{
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+
+	pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+	writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_rx(struct uart_port *port)
+{
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+
+	pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK;
+	writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void altera_jtaguart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_jtaguart_set_termios(struct uart_port *port,
+					struct ktermios *termios,
+					struct ktermios *old)
+{
+	/* Just copy the old termios settings back */
+	if (old)
+		tty_termios_copy_hw(termios, old);
+}
+
+static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
+{
+	struct uart_port *port = &pp->port;
+	unsigned char ch, flag;
+	unsigned long status;
+
+	while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
+	       ALTERA_JTAGUART_DATA_RVALID_MSK) {
+		ch = status & ALTERA_JTAGUART_DATA_DATA_MSK;
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		if (uart_handle_sysrq_char(port, ch))
+			continue;
+		uart_insert_char(port, 0, 0, ch, flag);
+	}
+
+	tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
+{
+	struct uart_port *port = &pp->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	unsigned int pending, count;
+
+	if (port->x_char) {
+		/* Send special char - probably flow control */
+		writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG);
+		port->x_char = 0;
+		port->icount.tx++;
+		return;
+	}
+
+	pending = uart_circ_chars_pending(xmit);
+	if (pending > 0) {
+		count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+				ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
+			ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+		if (count > pending)
+			count = pending;
+		if (count > 0) {
+			pending -= count;
+			while (count--) {
+				writel(xmit->buf[xmit->tail],
+				       port->membase + ALTERA_JTAGUART_DATA_REG);
+				xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+				port->icount.tx++;
+			}
+			if (pending < WAKEUP_CHARS)
+				uart_write_wakeup(port);
+		}
+	}
+
+	if (pending == 0) {
+		pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+		writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+	}
+}
+
+static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
+{
+	struct uart_port *port = data;
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+	unsigned int isr;
+
+	isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
+	       ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr;
+
+	spin_lock(&port->lock);
+
+	if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
+		altera_jtaguart_rx_chars(pp);
+	if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
+		altera_jtaguart_tx_chars(pp);
+
+	spin_unlock(&port->lock);
+
+	return IRQ_RETVAL(isr);
+}
+
+static void altera_jtaguart_config_port(struct uart_port *port, int flags)
+{
+	port->type = PORT_ALTERA_JTAGUART;
+
+	/* Clear mask, so no surprise interrupts. */
+	writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static int altera_jtaguart_startup(struct uart_port *port)
+{
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+	unsigned long flags;
+	int ret;
+
+	ret = request_irq(port->irq, altera_jtaguart_interrupt, IRQF_DISABLED,
+			DRV_NAME, port);
+	if (ret) {
+		pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
+		       "interrupt vector=%d\n", port->line, port->irq);
+		return ret;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* Enable RX interrupts now */
+	pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK;
+	writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return 0;
+}
+
+static void altera_jtaguart_shutdown(struct uart_port *port)
+{
+	struct altera_jtaguart *pp =
+	    container_of(port, struct altera_jtaguart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* Disable all interrupts now */
+	pp->imr = 0;
+	writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	free_irq(port->irq, port);
+}
+
+static const char *altera_jtaguart_type(struct uart_port *port)
+{
+	return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL;
+}
+
+static int altera_jtaguart_request_port(struct uart_port *port)
+{
+	/* UARTs always present */
+	return 0;
+}
+
+static void altera_jtaguart_release_port(struct uart_port *port)
+{
+	/* Nothing to release... */
+}
+
+static int altera_jtaguart_verify_port(struct uart_port *port,
+				       struct serial_struct *ser)
+{
+	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART)
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ *	Define the basic serial functions we support.
+ */
+static struct uart_ops altera_jtaguart_ops = {
+	.tx_empty	= altera_jtaguart_tx_empty,
+	.get_mctrl	= altera_jtaguart_get_mctrl,
+	.set_mctrl	= altera_jtaguart_set_mctrl,
+	.start_tx	= altera_jtaguart_start_tx,
+	.stop_tx	= altera_jtaguart_stop_tx,
+	.stop_rx	= altera_jtaguart_stop_rx,
+	.enable_ms	= altera_jtaguart_enable_ms,
+	.break_ctl	= altera_jtaguart_break_ctl,
+	.startup	= altera_jtaguart_startup,
+	.shutdown	= altera_jtaguart_shutdown,
+	.set_termios	= altera_jtaguart_set_termios,
+	.type		= altera_jtaguart_type,
+	.request_port	= altera_jtaguart_request_port,
+	.release_port	= altera_jtaguart_release_port,
+	.config_port	= altera_jtaguart_config_port,
+	.verify_port	= altera_jtaguart_verify_port,
+};
+
+#define ALTERA_JTAGUART_MAXPORTS 1
+static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
+
+int __init early_altera_jtaguart_setup(struct altera_jtaguart_platform_uart
+				       *platp)
+{
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+		port = &altera_jtaguart_ports[i].port;
+
+		port->line = i;
+		port->type = PORT_ALTERA_JTAGUART;
+		port->mapbase = platp[i].mapbase;
+		port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+		port->iotype = SERIAL_IO_MEM;
+		port->irq = platp[i].irq;
+		port->flags = ASYNC_BOOT_AUTOCONF;
+		port->ops = &altera_jtaguart_ops;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+	struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+	unsigned long status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
+		ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+		if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
+			spin_unlock_irqrestore(&port->lock, flags);
+			return;	/* no connection activity */
+		}
+		spin_unlock_irqrestore(&port->lock, flags);
+		cpu_relax();
+		spin_lock_irqsave(&port->lock, flags);
+	}
+	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+#else
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+	struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+		ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+		spin_unlock_irqrestore(&port->lock, flags);
+		cpu_relax();
+		spin_lock_irqsave(&port->lock, flags);
+	}
+	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif
+
+static void altera_jtaguart_console_write(struct console *co, const char *s,
+					  unsigned int count)
+{
+	for (; count; count--, s++) {
+		altera_jtaguart_console_putc(co, *s);
+		if (*s == '\n')
+			altera_jtaguart_console_putc(co, '\r');
+	}
+}
+
+static int __init altera_jtaguart_console_setup(struct console *co,
+						char *options)
+{
+	struct uart_port *port;
+
+	if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS)
+		return -EINVAL;
+	port = &altera_jtaguart_ports[co->index].port;
+	if (port->membase == 0)
+		return -ENODEV;
+	return 0;
+}
+
+static struct uart_driver altera_jtaguart_driver;
+
+static struct console altera_jtaguart_console = {
+	.name	= "ttyJ",
+	.write	= altera_jtaguart_console_write,
+	.device	= uart_console_device,
+	.setup	= altera_jtaguart_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &altera_jtaguart_driver,
+};
+
+static int __init altera_jtaguart_console_init(void)
+{
+	register_console(&altera_jtaguart_console);
+	return 0;
+}
+
+console_initcall(altera_jtaguart_console_init);
+
+#define	ALTERA_JTAGUART_CONSOLE	(&altera_jtaguart_console)
+
+#else
+
+#define	ALTERA_JTAGUART_CONSOLE	NULL
+
+#endif /* CONFIG_ALTERA_JTAGUART_CONSOLE */
+
+static struct uart_driver altera_jtaguart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "altera_jtaguart",
+	.dev_name	= "ttyJ",
+	.major		= ALTERA_JTAGUART_MAJOR,
+	.minor		= ALTERA_JTAGUART_MINOR,
+	.nr		= ALTERA_JTAGUART_MAXPORTS,
+	.cons		= ALTERA_JTAGUART_CONSOLE,
+};
+
+static int __devinit altera_jtaguart_probe(struct platform_device *pdev)
+{
+	struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data;
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+		port = &altera_jtaguart_ports[i].port;
+
+		port->line = i;
+		port->type = PORT_ALTERA_JTAGUART;
+		port->mapbase = platp[i].mapbase;
+		port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+		port->iotype = SERIAL_IO_MEM;
+		port->irq = platp[i].irq;
+		port->ops = &altera_jtaguart_ops;
+		port->flags = ASYNC_BOOT_AUTOCONF;
+
+		uart_add_one_port(&altera_jtaguart_driver, port);
+	}
+
+	return 0;
+}
+
+static int __devexit altera_jtaguart_remove(struct platform_device *pdev)
+{
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < ALTERA_JTAGUART_MAXPORTS; i++) {
+		port = &altera_jtaguart_ports[i].port;
+		if (port)
+			uart_remove_one_port(&altera_jtaguart_driver, port);
+	}
+
+	return 0;
+}
+
+static struct platform_driver altera_jtaguart_platform_driver = {
+	.probe	= altera_jtaguart_probe,
+	.remove	= __devexit_p(altera_jtaguart_remove),
+	.driver	= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init altera_jtaguart_init(void)
+{
+	int rc;
+
+	rc = uart_register_driver(&altera_jtaguart_driver);
+	if (rc)
+		return rc;
+	rc = platform_driver_register(&altera_jtaguart_platform_driver);
+	if (rc) {
+		uart_unregister_driver(&altera_jtaguart_driver);
+		return rc;
+	}
+	return 0;
+}
+
+static void __exit altera_jtaguart_exit(void)
+{
+	platform_driver_unregister(&altera_jtaguart_platform_driver);
+	uart_unregister_driver(&altera_jtaguart_driver);
+}
+
+module_init(altera_jtaguart_init);
+module_exit(altera_jtaguart_exit);
+
+MODULE_DESCRIPTION("Altera JTAG UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/altera_uart.c b/drivers/serial/altera_uart.c
new file mode 100644
index 0000000..bcee156
--- /dev/null
+++ b/drivers/serial/altera_uart.c
@@ -0,0 +1,570 @@
+/*
+ * altera_uart.c -- Altera UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_uart.h>
+
+#define DRV_NAME "altera_uart"
+
+/*
+ * Altera UART register definitions according to the Nios UART datasheet:
+ * http://www.altera.com/literature/ds/ds_nios_uart.pdf
+ */
+
+#define ALTERA_UART_SIZE		32
+
+#define ALTERA_UART_RXDATA_REG		0
+#define ALTERA_UART_TXDATA_REG		4
+#define ALTERA_UART_STATUS_REG		8
+#define ALTERA_UART_CONTROL_REG		12
+#define ALTERA_UART_DIVISOR_REG		16
+#define ALTERA_UART_EOP_REG		20
+
+#define ALTERA_UART_STATUS_PE_MSK	0x0001	/* parity error */
+#define ALTERA_UART_STATUS_FE_MSK	0x0002	/* framing error */
+#define ALTERA_UART_STATUS_BRK_MSK	0x0004	/* break */
+#define ALTERA_UART_STATUS_ROE_MSK	0x0008	/* RX overrun error */
+#define ALTERA_UART_STATUS_TOE_MSK	0x0010	/* TX overrun error */
+#define ALTERA_UART_STATUS_TMT_MSK	0x0020	/* TX shift register state */
+#define ALTERA_UART_STATUS_TRDY_MSK	0x0040	/* TX ready */
+#define ALTERA_UART_STATUS_RRDY_MSK	0x0080	/* RX ready */
+#define ALTERA_UART_STATUS_E_MSK	0x0100	/* exception condition */
+#define ALTERA_UART_STATUS_DCTS_MSK	0x0400	/* CTS logic-level change */
+#define ALTERA_UART_STATUS_CTS_MSK	0x0800	/* CTS logic state */
+#define ALTERA_UART_STATUS_EOP_MSK	0x1000	/* EOP written/read */
+
+						/* Enable interrupt on... */
+#define ALTERA_UART_CONTROL_PE_MSK	0x0001	/* ...parity error */
+#define ALTERA_UART_CONTROL_FE_MSK	0x0002	/* ...framing error */
+#define ALTERA_UART_CONTROL_BRK_MSK	0x0004	/* ...break */
+#define ALTERA_UART_CONTROL_ROE_MSK	0x0008	/* ...RX overrun */
+#define ALTERA_UART_CONTROL_TOE_MSK	0x0010	/* ...TX overrun */
+#define ALTERA_UART_CONTROL_TMT_MSK	0x0020	/* ...TX shift register empty */
+#define ALTERA_UART_CONTROL_TRDY_MSK	0x0040	/* ...TX ready */
+#define ALTERA_UART_CONTROL_RRDY_MSK	0x0080	/* ...RX ready */
+#define ALTERA_UART_CONTROL_E_MSK	0x0100	/* ...exception*/
+
+#define ALTERA_UART_CONTROL_TRBK_MSK	0x0200	/* TX break */
+#define ALTERA_UART_CONTROL_DCTS_MSK	0x0400	/* Interrupt on CTS change */
+#define ALTERA_UART_CONTROL_RTS_MSK	0x0800	/* RTS signal */
+#define ALTERA_UART_CONTROL_EOP_MSK	0x1000	/* Interrupt on EOP */
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_uart {
+	struct uart_port port;
+	unsigned int sigs;	/* Local copy of line sigs */
+	unsigned short imr;	/* Local IMR mirror */
+};
+
+static unsigned int altera_uart_tx_empty(struct uart_port *port)
+{
+	return (readl(port->membase + ALTERA_UART_STATUS_REG) &
+		ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_uart_get_mctrl(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+	unsigned int sigs;
+
+	spin_lock_irqsave(&port->lock, flags);
+	sigs =
+	    (readl(port->membase + ALTERA_UART_STATUS_REG) &
+	     ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
+	sigs |= (pp->sigs & TIOCM_RTS);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return sigs;
+}
+
+static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	pp->sigs = sigs;
+	if (sigs & TIOCM_RTS)
+		pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
+	else
+		pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_start_tx(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_tx(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_rx(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_break_ctl(struct uart_port *port, int break_state)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (break_state == -1)
+		pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
+	else
+		pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_uart_set_termios(struct uart_port *port,
+				    struct ktermios *termios,
+				    struct ktermios *old)
+{
+	unsigned long flags;
+	unsigned int baud, baudclk;
+
+	baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+	baudclk = port->uartclk / baud;
+
+	if (old)
+		tty_termios_copy_hw(termios, old);
+	tty_termios_encode_baud_rate(termios, baud, baud);
+
+	spin_lock_irqsave(&port->lock, flags);
+	writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_rx_chars(struct altera_uart *pp)
+{
+	struct uart_port *port = &pp->port;
+	unsigned char ch, flag;
+	unsigned short status;
+
+	while ((status = readl(port->membase + ALTERA_UART_STATUS_REG)) &
+	       ALTERA_UART_STATUS_RRDY_MSK) {
+		ch = readl(port->membase + ALTERA_UART_RXDATA_REG);
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		if (status & ALTERA_UART_STATUS_E_MSK) {
+			writel(status, port->membase + ALTERA_UART_STATUS_REG);
+
+			if (status & ALTERA_UART_STATUS_BRK_MSK) {
+				port->icount.brk++;
+				if (uart_handle_break(port))
+					continue;
+			} else if (status & ALTERA_UART_STATUS_PE_MSK) {
+				port->icount.parity++;
+			} else if (status & ALTERA_UART_STATUS_ROE_MSK) {
+				port->icount.overrun++;
+			} else if (status & ALTERA_UART_STATUS_FE_MSK) {
+				port->icount.frame++;
+			}
+
+			status &= port->read_status_mask;
+
+			if (status & ALTERA_UART_STATUS_BRK_MSK)
+				flag = TTY_BREAK;
+			else if (status & ALTERA_UART_STATUS_PE_MSK)
+				flag = TTY_PARITY;
+			else if (status & ALTERA_UART_STATUS_FE_MSK)
+				flag = TTY_FRAME;
+		}
+
+		if (uart_handle_sysrq_char(port, ch))
+			continue;
+		uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch,
+				 flag);
+	}
+
+	tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_uart_tx_chars(struct altera_uart *pp)
+{
+	struct uart_port *port = &pp->port;
+	struct circ_buf *xmit = &port->state->xmit;
+
+	if (port->x_char) {
+		/* Send special char - probably flow control */
+		writel(port->x_char, port->membase + ALTERA_UART_TXDATA_REG);
+		port->x_char = 0;
+		port->icount.tx++;
+		return;
+	}
+
+	while (readl(port->membase + ALTERA_UART_STATUS_REG) &
+	       ALTERA_UART_STATUS_TRDY_MSK) {
+		if (xmit->head == xmit->tail)
+			break;
+		writel(xmit->buf[xmit->tail],
+		       port->membase + ALTERA_UART_TXDATA_REG);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		port->icount.tx++;
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	if (xmit->head == xmit->tail) {
+		pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+		writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+	}
+}
+
+static irqreturn_t altera_uart_interrupt(int irq, void *data)
+{
+	struct uart_port *port = data;
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned int isr;
+
+	isr = readl(port->membase + ALTERA_UART_STATUS_REG) & pp->imr;
+	if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+		altera_uart_rx_chars(pp);
+	if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+		altera_uart_tx_chars(pp);
+	return IRQ_RETVAL(isr);
+}
+
+static void altera_uart_config_port(struct uart_port *port, int flags)
+{
+	port->type = PORT_ALTERA_UART;
+
+	/* Clear mask, so no surprise interrupts. */
+	writel(0, port->membase + ALTERA_UART_CONTROL_REG);
+	/* Clear status register */
+	writel(0, port->membase + ALTERA_UART_STATUS_REG);
+}
+
+static int altera_uart_startup(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+	int ret;
+
+	ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
+			DRV_NAME, port);
+	if (ret) {
+		pr_err(DRV_NAME ": unable to attach Altera UART %d "
+		       "interrupt vector=%d\n", port->line, port->irq);
+		return ret;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* Enable RX interrupts now */
+	pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return 0;
+}
+
+static void altera_uart_shutdown(struct uart_port *port)
+{
+	struct altera_uart *pp = container_of(port, struct altera_uart, port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* Disable all interrupts now */
+	pp->imr = 0;
+	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	free_irq(port->irq, port);
+}
+
+static const char *altera_uart_type(struct uart_port *port)
+{
+	return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL;
+}
+
+static int altera_uart_request_port(struct uart_port *port)
+{
+	/* UARTs always present */
+	return 0;
+}
+
+static void altera_uart_release_port(struct uart_port *port)
+{
+	/* Nothing to release... */
+}
+
+static int altera_uart_verify_port(struct uart_port *port,
+				   struct serial_struct *ser)
+{
+	if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART))
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ *	Define the basic serial functions we support.
+ */
+static struct uart_ops altera_uart_ops = {
+	.tx_empty	= altera_uart_tx_empty,
+	.get_mctrl	= altera_uart_get_mctrl,
+	.set_mctrl	= altera_uart_set_mctrl,
+	.start_tx	= altera_uart_start_tx,
+	.stop_tx	= altera_uart_stop_tx,
+	.stop_rx	= altera_uart_stop_rx,
+	.enable_ms	= altera_uart_enable_ms,
+	.break_ctl	= altera_uart_break_ctl,
+	.startup	= altera_uart_startup,
+	.shutdown	= altera_uart_shutdown,
+	.set_termios	= altera_uart_set_termios,
+	.type		= altera_uart_type,
+	.request_port	= altera_uart_request_port,
+	.release_port	= altera_uart_release_port,
+	.config_port	= altera_uart_config_port,
+	.verify_port	= altera_uart_verify_port,
+};
+
+static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
+
+int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
+{
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+		port = &altera_uart_ports[i].port;
+
+		port->line = i;
+		port->type = PORT_ALTERA_UART;
+		port->mapbase = platp[i].mapbase;
+		port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+		port->iotype = SERIAL_IO_MEM;
+		port->irq = platp[i].irq;
+		port->uartclk = platp[i].uartclk;
+		port->flags = ASYNC_BOOT_AUTOCONF;
+		port->ops = &altera_uart_ops;
+	}
+
+	return 0;
+}
+
+static void altera_uart_console_putc(struct console *co, const char c)
+{
+	struct uart_port *port = &(altera_uart_ports + co->index)->port;
+	int i;
+
+	for (i = 0; i < 0x10000; i++) {
+		if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+		    ALTERA_UART_STATUS_TRDY_MSK)
+			break;
+	}
+	writel(c, port->membase + ALTERA_UART_TXDATA_REG);
+	for (i = 0; i < 0x10000; i++) {
+		if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+		    ALTERA_UART_STATUS_TRDY_MSK)
+			break;
+	}
+}
+
+static void altera_uart_console_write(struct console *co, const char *s,
+				      unsigned int count)
+{
+	for (; count; count--, s++) {
+		altera_uart_console_putc(co, *s);
+		if (*s == '\n')
+			altera_uart_console_putc(co, '\r');
+	}
+}
+
+static int __init altera_uart_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port;
+	int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
+		return -EINVAL;
+	port = &altera_uart_ports[co->index].port;
+	if (port->membase == 0)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver altera_uart_driver;
+
+static struct console altera_uart_console = {
+	.name	= "ttyS",
+	.write	= altera_uart_console_write,
+	.device	= uart_console_device,
+	.setup	= altera_uart_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &altera_uart_driver,
+};
+
+static int __init altera_uart_console_init(void)
+{
+	register_console(&altera_uart_console);
+	return 0;
+}
+
+console_initcall(altera_uart_console_init);
+
+#define	ALTERA_UART_CONSOLE	(&altera_uart_console)
+
+#else
+
+#define	ALTERA_UART_CONSOLE	NULL
+
+#endif /* CONFIG_ALTERA_UART_CONSOLE */
+
+/*
+ *	Define the altera_uart UART driver structure.
+ */
+static struct uart_driver altera_uart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= DRV_NAME,
+	.dev_name	= "ttyS",
+	.major		= TTY_MAJOR,
+	.minor		= 64,
+	.nr		= CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
+	.cons		= ALTERA_UART_CONSOLE,
+};
+
+static int __devinit altera_uart_probe(struct platform_device *pdev)
+{
+	struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+		port = &altera_uart_ports[i].port;
+
+		port->line = i;
+		port->type = PORT_ALTERA_UART;
+		port->mapbase = platp[i].mapbase;
+		port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+		port->iotype = SERIAL_IO_MEM;
+		port->irq = platp[i].irq;
+		port->uartclk = platp[i].uartclk;
+		port->ops = &altera_uart_ops;
+		port->flags = ASYNC_BOOT_AUTOCONF;
+
+		uart_add_one_port(&altera_uart_driver, port);
+	}
+
+	return 0;
+}
+
+static int altera_uart_remove(struct platform_device *pdev)
+{
+	struct uart_port *port;
+	int i;
+
+	for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++) {
+		port = &altera_uart_ports[i].port;
+		if (port)
+			uart_remove_one_port(&altera_uart_driver, port);
+	}
+
+	return 0;
+}
+
+static struct platform_driver altera_uart_platform_driver = {
+	.probe	= altera_uart_probe,
+	.remove	= __devexit_p(altera_uart_remove),
+	.driver	= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= NULL,
+	},
+};
+
+static int __init altera_uart_init(void)
+{
+	int rc;
+
+	rc = uart_register_driver(&altera_uart_driver);
+	if (rc)
+		return rc;
+	rc = platform_driver_register(&altera_uart_platform_driver);
+	if (rc) {
+		uart_unregister_driver(&altera_uart_driver);
+		return rc;
+	}
+	return 0;
+}
+
+static void __exit altera_uart_exit(void)
+{
+	platform_driver_unregister(&altera_uart_platform_driver);
+	uart_unregister_driver(&altera_uart_driver);
+}
+
+module_init(altera_uart_init);
+module_exit(altera_uart_exit);
+
+MODULE_DESCRIPTION("Altera UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 743ebf5..eb4cb48 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -342,9 +342,9 @@
 	struct uart_amba_port *uap = (struct uart_amba_port *)port;
 	unsigned int status;
 
-	do {
-		status = readw(uap->port.membase + UART01x_FR);
-	} while (status & UART01x_FR_RXFE);
+	status = readw(uap->port.membase + UART01x_FR);
+	if (status & UART01x_FR_RXFE)
+		return NO_POLL_CHAR;
 
 	return readw(uap->port.membase + UART01x_DR);
 }
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index c88f8ad..e57fb3d 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -34,32 +34,12 @@
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 
+#include <asm/bfin_sport.h>
 #include <asm/delay.h>
 #include <asm/portmux.h>
 
 #include "bfin_sport_uart.h"
 
-#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
-unsigned short bfin_uart_pin_req_sport0[] =
-	{P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
-	 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
-unsigned short bfin_uart_pin_req_sport1[] =
-	{P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
-unsigned short bfin_uart_pin_req_sport2[] =
-	{P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
-	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
-unsigned short bfin_uart_pin_req_sport3[] =
-	{P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
-	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
-#endif
-
 struct sport_uart_port {
 	struct uart_port	port;
 	int			err_irq;
@@ -69,9 +49,13 @@
 	unsigned short		txmask2;
 	unsigned char		stopb;
 /*	unsigned char		parib; */
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+	int cts_pin;
+	int rts_pin;
+#endif
 };
 
-static void sport_uart_tx_chars(struct sport_uart_port *up);
+static int sport_uart_tx_chars(struct sport_uart_port *up);
 static void sport_stop_tx(struct uart_port *port);
 
 static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
@@ -219,6 +203,59 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+	struct sport_uart_port *up = (struct sport_uart_port *)port;
+	if (up->cts_pin < 0)
+		return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+	/* CTS PIN is negative assertive. */
+	if (SPORT_UART_GET_CTS(up))
+		return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+	else
+		return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct sport_uart_port *up = (struct sport_uart_port *)port;
+	if (up->rts_pin < 0)
+		return;
+
+	/* RTS PIN is negative assertive. */
+	if (mctrl & TIOCM_RTS)
+		SPORT_UART_ENABLE_RTS(up);
+	else
+		SPORT_UART_DISABLE_RTS(up);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t sport_mctrl_cts_int(int irq, void *dev_id)
+{
+	struct sport_uart_port *up = (struct sport_uart_port *)dev_id;
+	unsigned int status;
+
+	status = sport_get_mctrl(&up->port);
+	uart_handle_cts_change(&up->port, status & TIOCM_CTS);
+
+	return IRQ_HANDLED;
+}
+#else
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+	pr_debug("%s enter\n", __func__);
+	return TIOCM_CTS | TIOCM_CD | TIOCM_DSR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	pr_debug("%s enter\n", __func__);
+}
+#endif
+
 /* Reqeust IRQ, Setup clock */
 static int sport_startup(struct uart_port *port)
 {
@@ -247,6 +284,21 @@
 		goto fail2;
 	}
 
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+	if (up->cts_pin >= 0) {
+		if (request_irq(gpio_to_irq(up->cts_pin),
+			sport_mctrl_cts_int,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_DISABLED, "BFIN_SPORT_UART_CTS", up)) {
+			up->cts_pin = -1;
+			dev_info(port->dev, "Unable to attach BlackFin UART \
+				over SPORT CTS interrupt. So, disable it.\n");
+		}
+	}
+	if (up->rts_pin >= 0)
+		gpio_direction_output(up->rts_pin, 0);
+#endif
+
 	return 0;
  fail2:
 	free_irq(up->port.irq+1, up);
@@ -256,23 +308,35 @@
 	return ret;
 }
 
-static void sport_uart_tx_chars(struct sport_uart_port *up)
+/*
+ * sport_uart_tx_chars
+ *
+ * ret 1 means need to enable sport.
+ * ret 0 means do nothing.
+ */
+static int sport_uart_tx_chars(struct sport_uart_port *up)
 {
 	struct circ_buf *xmit = &up->port.state->xmit;
 
 	if (SPORT_GET_STAT(up) & TXF)
-		return;
+		return 0;
 
 	if (up->port.x_char) {
 		tx_one_byte(up, up->port.x_char);
 		up->port.icount.tx++;
 		up->port.x_char = 0;
-		return;
+		return 1;
 	}
 
 	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
-		sport_stop_tx(&up->port);
-		return;
+		/* The waiting loop to stop SPORT TX from TX interrupt is
+		 * too long. This may block SPORT RX interrupts and cause
+		 * RX FIFO overflow. So, do stop sport TX only after the last
+		 * char in TX FIFO is moved into the shift register.
+		 */
+		if (SPORT_GET_STAT(up) & TXHRE)
+			sport_stop_tx(&up->port);
+		return 0;
 	}
 
 	while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
@@ -283,6 +347,8 @@
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(&up->port);
+
+	return 1;
 }
 
 static unsigned int sport_tx_empty(struct uart_port *port)
@@ -298,23 +364,15 @@
 		return 0;
 }
 
-static unsigned int sport_get_mctrl(struct uart_port *port)
-{
-	pr_debug("%s enter\n", __func__);
-	return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR);
-}
-
-static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-	pr_debug("%s enter\n", __func__);
-}
-
 static void sport_stop_tx(struct uart_port *port)
 {
 	struct sport_uart_port *up = (struct sport_uart_port *)port;
 
 	pr_debug("%s enter\n", __func__);
 
+	if (!(SPORT_GET_TCR1(up) & TSPEN))
+		return;
+
 	/* Although the hold register is empty, last byte is still in shift
 	 * register and not sent out yet. So, put a dummy data into TX FIFO.
 	 * Then, sport tx stops when last byte is shift out and the dummy
@@ -337,11 +395,12 @@
 	pr_debug("%s enter\n", __func__);
 
 	/* Write data into SPORT FIFO before enable SPROT to transmit */
-	sport_uart_tx_chars(up);
+	if (sport_uart_tx_chars(up)) {
+		/* Enable transmit, then an interrupt will generated */
+		SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+		SSYNC();
+	}
 
-	/* Enable transmit, then an interrupt will generated */
-	SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
-	SSYNC();
 	pr_debug("%s exit\n", __func__);
 }
 
@@ -379,6 +438,10 @@
 	free_irq(up->port.irq, up);
 	free_irq(up->port.irq+1, up);
 	free_irq(up->err_irq, up);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+	if (up->cts_pin >= 0)
+		free_irq(gpio_to_irq(up->cts_pin), up);
+#endif
 }
 
 static const char *sport_type(struct uart_port *port)
@@ -448,27 +511,14 @@
 		/* up->parib = 1; */
 	}
 
-	port->read_status_mask = OE;
-	if (termios->c_iflag & INPCK)
-		port->read_status_mask |= (FE | PE);
-	if (termios->c_iflag & (BRKINT | PARMRK))
-		port->read_status_mask |= BI;
+	spin_lock_irqsave(&up->port.lock, flags);
+
+	port->read_status_mask = 0;
 
 	/*
 	 * Characters to ignore
 	 */
 	port->ignore_status_mask = 0;
-	if (termios->c_iflag & IGNPAR)
-		port->ignore_status_mask |= FE | PE;
-	if (termios->c_iflag & IGNBRK) {
-		port->ignore_status_mask |= BI;
-		/*
-		 * If we're ignoring parity and break indicators,
-		 * ignore overruns too (for real raw support).
-		 */
-		if (termios->c_iflag & IGNPAR)
-			port->ignore_status_mask |= OE;
-	}
 
 	/* RX extract mask */
 	up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
@@ -488,8 +538,6 @@
 	/* uart baud rate */
 	port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
 
-	spin_lock_irqsave(&up->port.lock, flags);
-
 	/* Disable UART */
 	SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
 	SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
@@ -542,6 +590,8 @@
 static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
 
 #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+#define CLASS_BFIN_SPORT_CONSOLE	"bfin-sport-console"
+
 static int __init
 sport_uart_console_setup(struct console *co, char *options)
 {
@@ -549,7 +599,11 @@
 	int baud = 57600;
 	int bits = 8;
 	int parity = 'n';
+# ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+	int flow = 'r';
+# else
 	int flow = 'n';
+# endif
 
 	/* Check whether an invalid uart number has been specified */
 	if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
@@ -690,11 +744,11 @@
 
 	if (bfin_sport_uart_ports[pdev->id] == NULL) {
 		bfin_sport_uart_ports[pdev->id] =
-			kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+			kzalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
 		sport = bfin_sport_uart_ports[pdev->id];
 		if (!sport) {
 			dev_err(&pdev->dev,
-				"Fail to kmalloc sport_uart_port\n");
+				"Fail to malloc sport_uart_port\n");
 			return -ENOMEM;
 		}
 
@@ -720,13 +774,13 @@
 			goto out_error_free_peripherals;
 		}
 
-		sport->port.membase = ioremap(res->start,
-			res->end - res->start);
+		sport->port.membase = ioremap(res->start, resource_size(res));
 		if (!sport->port.membase) {
 			dev_err(&pdev->dev, "Cannot map sport IO\n");
 			ret = -ENXIO;
 			goto out_error_free_peripherals;
 		}
+		sport->port.mapbase = res->start;
 
 		sport->port.irq = platform_get_irq(pdev, 0);
 		if (sport->port.irq < 0) {
@@ -741,6 +795,22 @@
 			ret = -ENOENT;
 			goto out_error_unmap;
 		}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+		if (res == NULL)
+			sport->cts_pin = -1;
+		else
+			sport->cts_pin = res->start;
+
+		res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+		if (res == NULL)
+			sport->rts_pin = -1;
+		else
+			sport->rts_pin = res->start;
+
+		if (sport->rts_pin >= 0)
+			gpio_request(sport->rts_pin, DRV_NAME);
+#endif
 	}
 
 #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
@@ -779,6 +849,10 @@
 
 	if (sport) {
 		uart_remove_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+		if (sport->rts_pin >= 0)
+			gpio_free(sport->rts_pin);
+#endif
 		iounmap(sport->port.membase);
 		peripheral_free_list(
 			(unsigned short *)pdev->dev.platform_data);
@@ -802,7 +876,7 @@
 
 #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
 static __initdata struct early_platform_driver early_sport_uart_driver = {
-	.class_str = DRV_NAME,
+	.class_str = CLASS_BFIN_SPORT_CONSOLE,
 	.pdrv = &sport_uart_driver,
 	.requested_id = EARLY_PLATFORM_ID_UNSET,
 };
@@ -811,7 +885,8 @@
 {
 	early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
 
-	early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+	early_platform_driver_probe(CLASS_BFIN_SPORT_CONSOLE,
+		BFIN_SPORT_UART_MAX_PORTS, 0);
 
 	register_console(&sport_uart_console);
 
@@ -824,7 +899,7 @@
 {
 	int ret;
 
-	pr_info("Serial: Blackfin uart over sport driver\n");
+	pr_info("Blackfin uart over sport driver\n");
 
 	ret = uart_register_driver(&sport_uart_reg);
 	if (ret) {
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index abe0361..9ce253e 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -37,7 +37,21 @@
 #define SPORT_GET_TFSDIV(sport)		bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
 #define SPORT_GET_TX(sport)		bfin_read16(((sport)->port.membase + OFFSET_TX))
 #define SPORT_GET_RX(sport)		bfin_read16(((sport)->port.membase + OFFSET_RX))
-#define SPORT_GET_RX32(sport)		bfin_read32(((sport)->port.membase + OFFSET_RX))
+/*
+ * If another interrupt fires while doing a 32-bit read from RX FIFO,
+ * a fake RX underflow error will be generated.  So disable interrupts
+ * to prevent interruption while reading the FIFO.
+ */
+#define SPORT_GET_RX32(sport) \
+({ \
+	unsigned int __ret; \
+	if (ANOMALY_05000473) \
+		local_irq_disable(); \
+	__ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
+	if (ANOMALY_05000473) \
+		local_irq_enable(); \
+	__ret; \
+})
 #define SPORT_GET_RCR1(sport)		bfin_read16(((sport)->port.membase + OFFSET_RCR1))
 #define SPORT_GET_RCR2(sport)		bfin_read16(((sport)->port.membase + OFFSET_RCR2))
 #define SPORT_GET_RCLKDIV(sport)	bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
@@ -58,4 +72,15 @@
 
 #define SPORT_TX_FIFO_SIZE	8
 
+#define SPORT_UART_GET_CTS(x)		gpio_get_value(x->cts_pin)
+#define SPORT_UART_DISABLE_RTS(x)	gpio_set_value(x->rts_pin, 1)
+#define SPORT_UART_ENABLE_RTS(x)	gpio_set_value(x->rts_pin, 0)
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT0_UART_CTSRTS) \
+	|| defined(CONFIG_SERIAL_BFIN_SPORT1_UART_CTSRTS) \
+	|| defined(CONFIG_SERIAL_BFIN_SPORT2_UART_CTSRTS) \
+	|| defined(CONFIG_SERIAL_BFIN_SPORT3_UART_CTSRTS)
+# define CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+#endif
+
 #endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index eadc1ab..a9a94ae 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -14,7 +14,9 @@
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/kgdb.h>
+#include <linux/kdb.h>
 #include <linux/tty.h>
+#include <linux/console.h>
 
 #define MAX_CONFIG_LEN		40
 
@@ -32,6 +34,40 @@
 static struct tty_driver	*kgdb_tty_driver;
 static int			kgdb_tty_line;
 
+#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_register_kbd(char **cptr)
+{
+	if (strncmp(*cptr, "kbd", 3) == 0) {
+		if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+			kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+			kdb_poll_idx++;
+			if (cptr[0][3] == ',')
+				*cptr += 4;
+			else
+				return 1;
+		}
+	}
+	return 0;
+}
+
+static void kgdboc_unregister_kbd(void)
+{
+	int i;
+
+	for (i = 0; i < kdb_poll_idx; i++) {
+		if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+			kdb_poll_idx--;
+			kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+			kdb_poll_funcs[kdb_poll_idx] = NULL;
+			i--;
+		}
+	}
+}
+#else /* ! CONFIG_KDB_KEYBOARD */
+#define kgdboc_register_kbd(x) 0
+#define kgdboc_unregister_kbd()
+#endif /* ! CONFIG_KDB_KEYBOARD */
+
 static int kgdboc_option_setup(char *opt)
 {
 	if (strlen(opt) > MAX_CONFIG_LEN) {
@@ -45,25 +81,51 @@
 
 __setup("kgdboc=", kgdboc_option_setup);
 
+static void cleanup_kgdboc(void)
+{
+	kgdboc_unregister_kbd();
+	if (configured == 1)
+		kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
 static int configure_kgdboc(void)
 {
 	struct tty_driver *p;
 	int tty_line = 0;
 	int err;
+	char *cptr = config;
+	struct console *cons;
 
 	err = kgdboc_option_setup(config);
 	if (err || !strlen(config) || isspace(config[0]))
 		goto noconfig;
 
 	err = -ENODEV;
+	kgdboc_io_ops.is_console = 0;
+	kgdb_tty_driver = NULL;
 
-	p = tty_find_polling_driver(config, &tty_line);
+	if (kgdboc_register_kbd(&cptr))
+		goto do_register;
+
+	p = tty_find_polling_driver(cptr, &tty_line);
 	if (!p)
 		goto noconfig;
 
+	cons = console_drivers;
+	while (cons) {
+		int idx;
+		if (cons->device && cons->device(cons, &idx) == p &&
+		    idx == tty_line) {
+			kgdboc_io_ops.is_console = 1;
+			break;
+		}
+		cons = cons->next;
+	}
+
 	kgdb_tty_driver = p;
 	kgdb_tty_line = tty_line;
 
+do_register:
 	err = kgdb_register_io_module(&kgdboc_io_ops);
 	if (err)
 		goto noconfig;
@@ -75,6 +137,7 @@
 noconfig:
 	config[0] = 0;
 	configured = 0;
+	cleanup_kgdboc();
 
 	return err;
 }
@@ -88,20 +151,18 @@
 	return configure_kgdboc();
 }
 
-static void cleanup_kgdboc(void)
-{
-	if (configured == 1)
-		kgdb_unregister_io_module(&kgdboc_io_ops);
-}
-
 static int kgdboc_get_char(void)
 {
+	if (!kgdb_tty_driver)
+		return -1;
 	return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
 						kgdb_tty_line);
 }
 
 static void kgdboc_put_char(u8 chr)
 {
+	if (!kgdb_tty_driver)
+		return;
 	kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
 					kgdb_tty_line, chr);
 }
@@ -162,6 +223,25 @@
 	.post_exception		= kgdboc_post_exp_handler,
 };
 
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+int __init kgdboc_early_init(char *opt)
+{
+	/* save the first character of the config string because the
+	 * init routine can destroy it.
+	 */
+	char save_ch;
+
+	kgdboc_option_setup(opt);
+	save_ch = config[0];
+	init_kgdboc();
+	config[0] = save_ch;
+	return 0;
+}
+
+early_param("ekgdboc", kgdboc_early_init);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
 module_init(init_kgdboc);
 module_exit(cleanup_kgdboc);
 module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 55e113a..6a9c660 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -2071,6 +2071,7 @@
 
 		if (!(rc = mpsc_drv_map_regs(pi, dev))) {
 			mpsc_drv_get_platform_data(pi, dev, dev->id);
+			pi->port.dev = &dev->dev;
 
 			if (!(rc = mpsc_make_ready(pi))) {
 				spin_lock_init(&pi->tx_lock);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8d993c4..f250a61 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -151,7 +151,11 @@
 			handle_error(port);
 			continue;
 		}
-	} while (!(status & SCxSR_RDxF(port)));
+		break;
+	} while (1);
+
+	if (!(status & SCxSR_RDxF(port)))
+		return NO_POLL_CHAR;
 
 	c = sci_in(port, SCxRDR);
 
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 2c7a66a..978b3ce 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -102,6 +102,8 @@
 #endif
 };
 
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
 #define ZILOG_CHANNEL_FROM_PORT(PORT)	((struct zilog_channel __iomem *)((PORT)->membase))
 #define UART_ZILOG(PORT)		((struct uart_sunzilog_port *)(PORT))
 
@@ -996,6 +998,50 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+	unsigned char ch, r1;
+	struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+	struct zilog_channel __iomem *channel
+		= ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+	r1 = read_zsreg(channel, R1);
+	if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+		writeb(ERR_RES, &channel->control);
+		ZSDELAY();
+		ZS_WSYNC(channel);
+	}
+
+	ch = readb(&channel->control);
+	ZSDELAY();
+
+	/* This funny hack depends upon BRK_ABRT not interfering
+	 * with the other bits we care about in R1.
+	 */
+	if (ch & BRK_ABRT)
+		r1 |= BRK_ABRT;
+
+	if (!(ch & Rx_CH_AV))
+		return NO_POLL_CHAR;
+
+	ch = readb(&channel->data);
+	ZSDELAY();
+
+	ch &= up->parity_mask;
+	return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+			unsigned char ch)
+{
+	struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+	sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
 static struct uart_ops sunzilog_pops = {
 	.tx_empty	=	sunzilog_tx_empty,
 	.set_mctrl	=	sunzilog_set_mctrl,
@@ -1013,6 +1059,10 @@
 	.request_port	=	sunzilog_request_port,
 	.config_port	=	sunzilog_config_port,
 	.verify_port	=	sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char	=	sunzilog_get_poll_char,
+	.poll_put_char	=	sunzilog_put_poll_char,
+#endif
 };
 
 static int uart_chip_count;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 786ba85..67ca642 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -68,12 +68,22 @@
 	tasklet_schedule(&uart->tasklet);
 }
 
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+	u32 isr = ioread32(port->membase + TIMBUART_ISR);
+
+	return (isr & TXBE) ? TIOCSER_TEMT : 0;
+}
+
 static void timbuart_flush_buffer(struct uart_port *port)
 {
-	u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
+	if (!timbuart_tx_empty(port)) {
+		u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+			TIMBUART_CTRL_FLSHTX;
 
-	iowrite8(ctl, port->membase + TIMBUART_CTRL);
-	iowrite32(TXBF, port->membase + TIMBUART_ISR);
+		iowrite8(ctl, port->membase + TIMBUART_CTRL);
+		iowrite32(TXBF, port->membase + TIMBUART_ISR);
+	}
 }
 
 static void timbuart_rx_chars(struct uart_port *port)
@@ -195,13 +205,6 @@
 	dev_dbg(uart->port.dev, "%s leaving\n", __func__);
 }
 
-static unsigned int timbuart_tx_empty(struct uart_port *port)
-{
-	u32 isr = ioread32(port->membase + TIMBUART_ISR);
-
-	return (isr & TXBE) ? TIOCSER_TEMT : 0;
-}
-
 static unsigned int timbuart_get_mctrl(struct uart_port *port)
 {
 	u8 cts = ioread8(port->membase + TIMBUART_CTRL);
@@ -220,7 +223,7 @@
 	if (mctrl & TIOCM_RTS)
 		iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
 	else
-		iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+		iowrite8(0, port->membase + TIMBUART_CTRL);
 }
 
 static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index f0a6c61..e6639a9 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -86,7 +86,7 @@
 	/* stats */
 	if (stat & ULITE_STATUS_RXVALID) {
 		port->icount.rx++;
-		ch = readb(port->membase + ULITE_RX);
+		ch = ioread32be(port->membase + ULITE_RX);
 
 		if (stat & ULITE_STATUS_PARITY)
 			port->icount.parity++;
@@ -131,7 +131,7 @@
 		return 0;
 
 	if (port->x_char) {
-		writeb(port->x_char, port->membase + ULITE_TX);
+		iowrite32be(port->x_char, port->membase + ULITE_TX);
 		port->x_char = 0;
 		port->icount.tx++;
 		return 1;
@@ -140,7 +140,7 @@
 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
 		return 0;
 
-	writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+	iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
 	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
 	port->icount.tx++;
 
@@ -157,7 +157,7 @@
 	int busy, n = 0;
 
 	do {
-		int stat = readb(port->membase + ULITE_STATUS);
+		int stat = ioread32be(port->membase + ULITE_STATUS);
 		busy  = ulite_receive(port, stat);
 		busy |= ulite_transmit(port, stat);
 		n++;
@@ -178,7 +178,7 @@
 	unsigned int ret;
 
 	spin_lock_irqsave(&port->lock, flags);
-	ret = readb(port->membase + ULITE_STATUS);
+	ret = ioread32be(port->membase + ULITE_STATUS);
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
@@ -201,7 +201,7 @@
 
 static void ulite_start_tx(struct uart_port *port)
 {
-	ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+	ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
 }
 
 static void ulite_stop_rx(struct uart_port *port)
@@ -230,17 +230,17 @@
 	if (ret)
 		return ret;
 
-	writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+	iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
 	       port->membase + ULITE_CONTROL);
-	writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+	iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
 
 	return 0;
 }
 
 static void ulite_shutdown(struct uart_port *port)
 {
-	writeb(0, port->membase + ULITE_CONTROL);
-	readb(port->membase + ULITE_CONTROL); /* dummy */
+	iowrite32be(0, port->membase + ULITE_CONTROL);
+	ioread32be(port->membase + ULITE_CONTROL); /* dummy */
 	free_irq(port->irq, port);
 }
 
@@ -352,7 +352,7 @@
 
 	/* Spin waiting for TX fifo to have space available */
 	for (i = 0; i < 100000; i++) {
-		val = readb(port->membase + ULITE_STATUS);
+		val = ioread32be(port->membase + ULITE_STATUS);
 		if ((val & ULITE_STATUS_TXFULL) == 0)
 			break;
 		cpu_relax();
@@ -362,7 +362,7 @@
 static void ulite_console_putchar(struct uart_port *port, int ch)
 {
 	ulite_console_wait_tx(port);
-	writeb(ch, port->membase + ULITE_TX);
+	iowrite32be(ch, port->membase + ULITE_TX);
 }
 
 static void ulite_console_write(struct console *co, const char *s,
@@ -379,8 +379,8 @@
 		spin_lock_irqsave(&port->lock, flags);
 
 	/* save and disable interrupt */
-	ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
-	writeb(0, port->membase + ULITE_CONTROL);
+	ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+	iowrite32be(0, port->membase + ULITE_CONTROL);
 
 	uart_console_write(port, s, count, ulite_console_putchar);
 
@@ -388,7 +388,7 @@
 
 	/* restore interrupt state */
 	if (ier)
-		writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+		iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
 
 	if (locked)
 		spin_unlock_irqrestore(&port->lock, flags);
@@ -601,7 +601,7 @@
 
 	id = of_get_property(op->node, "port-number", NULL);
 
-	return ulite_assign(&op->dev, id ? *id : -1, res.start+3, irq);
+	return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
 }
 
 static int __devexit ulite_of_remove(struct of_device *op)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a191fa2..f950b63 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -180,10 +180,10 @@
 	  This hooks up to the MicroWire controller on OMAP1 chips.
 
 config SPI_OMAP24XX
-	tristate "McSPI driver for OMAP24xx/OMAP34xx"
-	depends on ARCH_OMAP2 || ARCH_OMAP3
+	tristate "McSPI driver for OMAP"
+	depends on ARCH_OMAP2PLUS
 	help
-	  SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
+	  SPI master controller for OMAP24XX and later Multichannel SPI
 	  (McSPI) modules.
 
 config SPI_OMAP_100K
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 239998b..a91db6c 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -48,6 +48,7 @@
 */
 
 #include "../comedidev.h"
+#include <linux/semaphore.h>
 
 #include <pcmcia/cs_types.h>
 #include <pcmcia/cs.h>
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index d0a5090..c7e061e 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -1063,7 +1063,8 @@
 			atomic_read(&dev->lost_pixels) ? "yes" : "no");
 }
 
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *a,
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+			 struct bin_attribute *a,
 			 char *buf, loff_t off, size_t count) {
 	struct device *fbdev = container_of(kobj, struct device, kobj);
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 06863be..448f5b4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -658,7 +658,8 @@
 /* Binary descriptors */
 
 static ssize_t
-read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
+read_descriptors(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 397b678..5ae14f6 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -573,7 +573,7 @@
 			iface->condition == USB_INTERFACE_UNBOUND))
 		return -EINTR;
 
-	while (usb_trylock_device(udev) != 0) {
+	while (!usb_trylock_device(udev)) {
 
 		/* If we can't acquire the lock after waiting one second,
 		 * we're probably deadlocked */
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 6e98a36..94ecdbc 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -19,6 +19,9 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/ehci_def.h>
 #include <linux/delay.h>
+#include <linux/serial_core.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
 #include <asm/io.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
@@ -55,6 +58,7 @@
 static struct ehci_dbg_port __iomem *ehci_debug;
 static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
 static unsigned int dbgp_endpoint_out;
+static unsigned int dbgp_endpoint_in;
 
 struct ehci_dev {
 	u32 bus;
@@ -91,6 +95,13 @@
 	return (x & ~0x0f) | (len & 0x0f);
 }
 
+#ifdef CONFIG_KGDB
+static struct kgdb_io kgdbdbgp_io_ops;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+#else
+#define dbgp_kgdb_mode (0)
+#endif
+
 /*
  * USB Packet IDs (PIDs)
  */
@@ -182,11 +193,10 @@
 	/* Sleep to give the debug port a chance to breathe */
 }
 
-static int dbgp_wait_until_done(unsigned ctrl)
+static int dbgp_wait_until_done(unsigned ctrl, int loop)
 {
 	u32 pids, lpid;
 	int ret;
-	int loop = DBGP_LOOPS;
 
 retry:
 	writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -276,13 +286,13 @@
 	dbgp_set_data(bytes, size);
 	writel(addr, &ehci_debug->address);
 	writel(pids, &ehci_debug->pids);
-	ret = dbgp_wait_until_done(ctrl);
+	ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
 
 	return ret;
 }
 
 static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
-				 int size)
+			  int size, int loops)
 {
 	u32 pids, addr, ctrl;
 	int ret;
@@ -302,7 +312,7 @@
 
 	writel(addr, &ehci_debug->address);
 	writel(pids, &ehci_debug->pids);
-	ret = dbgp_wait_until_done(ctrl);
+	ret = dbgp_wait_until_done(ctrl, loops);
 	if (ret < 0)
 		return ret;
 
@@ -343,12 +353,12 @@
 	dbgp_set_data(&req, sizeof(req));
 	writel(addr, &ehci_debug->address);
 	writel(pids, &ehci_debug->pids);
-	ret = dbgp_wait_until_done(ctrl);
+	ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
 	if (ret < 0)
 		return ret;
 
 	/* Read the result */
-	return dbgp_bulk_read(devnum, 0, data, size);
+	return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS);
 }
 
 /* Find a PCI capability */
@@ -559,6 +569,7 @@
 		goto err;
 	}
 	dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
+	dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
 
 	/* Move the device to 127 if it isn't already there */
 	if (devnum != USB_DEBUG_DEVNUM) {
@@ -968,8 +979,9 @@
 	if (!ehci_debug)
 		return 0;
 
-	if (early_dbgp_console.index != -1 &&
-		!(early_dbgp_console.flags & CON_BOOT))
+	if ((early_dbgp_console.index != -1 &&
+	     !(early_dbgp_console.flags & CON_BOOT)) ||
+	    dbgp_kgdb_mode)
 		return 1;
 	/* This means the console is not initialized, or should get
 	 * shutdown so as to allow for reuse of the usb device, which
@@ -982,3 +994,93 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+
+#ifdef CONFIG_KGDB
+
+static char kgdbdbgp_buf[DBGP_MAX_PACKET];
+static int kgdbdbgp_buf_sz;
+static int kgdbdbgp_buf_idx;
+static int kgdbdbgp_loop_cnt = DBGP_LOOPS;
+
+static int kgdbdbgp_read_char(void)
+{
+	int ret;
+
+	if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) {
+		char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++];
+		return ch;
+	}
+
+	ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in,
+			     &kgdbdbgp_buf, DBGP_MAX_PACKET,
+			     kgdbdbgp_loop_cnt);
+	if (ret <= 0)
+		return NO_POLL_CHAR;
+	kgdbdbgp_buf_sz = ret;
+	kgdbdbgp_buf_idx = 1;
+	return kgdbdbgp_buf[0];
+}
+
+static void kgdbdbgp_write_char(u8 chr)
+{
+	early_dbgp_write(NULL, &chr, 1);
+}
+
+static struct kgdb_io kgdbdbgp_io_ops = {
+	.name = "kgdbdbgp",
+	.read_char = kgdbdbgp_read_char,
+	.write_char = kgdbdbgp_write_char,
+};
+
+static int kgdbdbgp_wait_time;
+
+static int __init kgdbdbgp_parse_config(char *str)
+{
+	char *ptr;
+
+	if (!ehci_debug) {
+		if (early_dbgp_init(str))
+			return -1;
+	}
+	ptr = strchr(str, ',');
+	if (ptr) {
+		ptr++;
+		kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+	}
+	kgdb_register_io_module(&kgdbdbgp_io_ops);
+	kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+
+	return 0;
+}
+early_param("kgdbdbgp", kgdbdbgp_parse_config);
+
+static int kgdbdbgp_reader_thread(void *ptr)
+{
+	int ret;
+
+	while (readl(&ehci_debug->control) & DBGP_ENABLED) {
+		kgdbdbgp_loop_cnt = 1;
+		ret = kgdbdbgp_read_char();
+		kgdbdbgp_loop_cnt = DBGP_LOOPS;
+		if (ret != NO_POLL_CHAR) {
+			if (ret == 0x3 || ret == '$') {
+				if (ret == '$')
+					kgdbdbgp_buf_idx--;
+				kgdb_breakpoint();
+			}
+			continue;
+		}
+		schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ);
+	}
+	return 0;
+}
+
+static int __init kgdbdbgp_start_thread(void)
+{
+	if (dbgp_kgdb_mode && kgdbdbgp_wait_time)
+		kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp");
+
+	return 0;
+}
+module_init(kgdbdbgp_start_thread);
+#endif /* CONFIG_KGDB */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index fd55c27..1e6fec4 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2202,7 +2202,6 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
-	default y
 
 config FB_MX3
 	tristate "MX3 Framebuffer support"
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 6c37e8e..3c1e13e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2099,7 +2099,7 @@
 }
 
 
-static ssize_t radeon_show_edid1(struct kobject *kobj,
+static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t off, size_t count)
 {
@@ -2112,7 +2112,7 @@
 }
 
 
-static ssize_t radeon_show_edid2(struct kobject *kobj,
+static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index ecf4055..4a56f46af 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -168,7 +168,7 @@
 {
 	if (info->screen_base)
 		iounmap(info->screen_base);
-	release_mem_region(info->aperture_base, info->aperture_size);
+	release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
 	framebuffer_release(info);
 }
 
@@ -292,8 +292,13 @@
 	info->pseudo_palette = info->par;
 	info->par = NULL;
 
-	info->aperture_base = efifb_fix.smem_start;
-	info->aperture_size = size_remap;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		err = -ENOMEM;
+		goto err_release_fb;
+	}
+	info->apertures->ranges[0].base = efifb_fix.smem_start;
+	info->apertures->ranges[0].size = size_remap;
 
 	info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
 	if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a15b44e..e08b7b5 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1468,16 +1468,67 @@
 	return 0;
 }
 
-static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
+static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
 {
 	/* is the generic aperture base the same as the HW one */
-	if (gen->aperture_base == hw->aperture_base)
+	if (gen->base == hw->base)
 		return true;
 	/* is the generic aperture base inside the hw base->hw base+size */
-	if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
+	if (gen->base > hw->base && gen->base <= hw->base + hw->size)
 		return true;
 	return false;
 }
+
+static bool fb_do_apertures_overlap(struct apertures_struct *gena,
+				    struct apertures_struct *hwa)
+{
+	int i, j;
+	if (!hwa || !gena)
+		return false;
+
+	for (i = 0; i < hwa->count; ++i) {
+		struct aperture *h = &hwa->ranges[i];
+		for (j = 0; j < gena->count; ++j) {
+			struct aperture *g = &gena->ranges[j];
+			printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
+				g->base, g->size, h->base, h->size);
+			if (apertures_overlap(g, h))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+#define VGA_FB_PHYS 0xA0000
+void remove_conflicting_framebuffers(struct apertures_struct *a,
+				     const char *name, bool primary)
+{
+	int i;
+
+	/* check all firmware fbs and kick off if the base addr overlaps */
+	for (i = 0 ; i < FB_MAX; i++) {
+		struct apertures_struct *gen_aper;
+		if (!registered_fb[i])
+			continue;
+
+		if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
+			continue;
+
+		gen_aper = registered_fb[i]->apertures;
+		if (fb_do_apertures_overlap(gen_aper, a) ||
+			(primary && gen_aper && gen_aper->count &&
+			 gen_aper->ranges[0].base == VGA_FB_PHYS)) {
+
+			printk(KERN_ERR "fb: conflicting fb hw usage "
+			       "%s vs %s - removing generic driver\n",
+			       name, registered_fb[i]->fix.id);
+			unregister_framebuffer(registered_fb[i]);
+		}
+	}
+}
+EXPORT_SYMBOL(remove_conflicting_framebuffers);
+
 /**
  *	register_framebuffer - registers a frame buffer device
  *	@fb_info: frame buffer info structure
@@ -1501,21 +1552,8 @@
 	if (fb_check_foreignness(fb_info))
 		return -ENOSYS;
 
-	/* check all firmware fbs and kick off if the base addr overlaps */
-	for (i = 0 ; i < FB_MAX; i++) {
-		if (!registered_fb[i])
-			continue;
-
-		if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
-			if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
-				printk(KERN_ERR "fb: conflicting fb hw usage "
-				       "%s vs %s - removing generic driver\n",
-				       fb_info->fix.id,
-				       registered_fb[i]->fix.id);
-				unregister_framebuffer(registered_fb[i]);
-			}
-		}
-	}
+	remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
+					 fb_is_primary_device(fb_info));
 
 	num_registered_fb++;
 	for (i = 0 ; i < FB_MAX; i++)
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 81aa312..0a08f13 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,7 @@
  */
 void framebuffer_release(struct fb_info *info)
 {
+	kfree(info->apertures);
 	kfree(info);
 }
 EXPORT_SYMBOL(framebuffer_release);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 61f8b8f..46dda7d 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -285,7 +285,7 @@
 {
 	if (info->screen_base)
 		iounmap(info->screen_base);
-	release_mem_region(info->aperture_base, info->aperture_size);
+	release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
 	framebuffer_release(info);
 }
 
@@ -491,8 +491,11 @@
 	var->vmode = FB_VMODE_NONINTERLACED;
 
 	/* set offb aperture size for generic probing */
-	info->aperture_base = address;
-	info->aperture_size = fix->smem_len;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures)
+		goto out_aper;
+	info->apertures->ranges[0].base = address;
+	info->apertures->ranges[0].size = fix->smem_len;
 
 	info->fbops = &offb_ops;
 	info->screen_base = ioremap(address, fix->smem_len);
@@ -501,17 +504,20 @@
 
 	fb_alloc_cmap(&info->cmap, 256, 0);
 
-	if (register_framebuffer(info) < 0) {
-		iounmap(par->cmap_adr);
-		par->cmap_adr = NULL;
-		iounmap(info->screen_base);
-		framebuffer_release(info);
-		release_mem_region(res_start, res_size);
-		return;
-	}
+	if (register_framebuffer(info) < 0)
+		goto out_err;
 
 	printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
 	       info->node, full_name);
+	return;
+
+out_err:
+	iounmap(info->screen_base);
+out_aper:
+	iounmap(par->cmap_adr);
+	par->cmap_adr = NULL;
+	framebuffer_release(info);
+	release_mem_region(res_start, res_size);
 }
 
 
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index dfb57ee..881c9f7 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -10,6 +10,7 @@
 config PANEL_SHARP_LS037V7DW01
         tristate "Sharp LS037V7DW01 LCD Panel"
         depends on OMAP2_DSS
+        select BACKLIGHT_CLASS_DEVICE
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
@@ -33,8 +34,14 @@
 
 config PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
-        depends on OMAP2_DSS && I2C
+        depends on OMAP2_DSS && SPI
         help
           LCD Panel used in OMAP3 Pandora
 
+config PANEL_ACX565AKM
+	tristate "ACX565AKM Panel"
+	depends on OMAP2_DSS_SDI
+	select BACKLIGHT_CLASS_DEVICE
+	help
+	  This is the LCD panel used on Nokia N900
 endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index e2bb321..aa38609 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
 obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
 obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
new file mode 100644
index 0000000..1f8eb70
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -0,0 +1,819 @@
+/*
+ * Support for ACX565AKM LCD Panel used on Nokia N900
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define MIPID_CMD_READ_DISP_ID		0x04
+#define MIPID_CMD_READ_RED		0x06
+#define MIPID_CMD_READ_GREEN		0x07
+#define MIPID_CMD_READ_BLUE		0x08
+#define MIPID_CMD_READ_DISP_STATUS	0x09
+#define MIPID_CMD_RDDSDR		0x0F
+#define MIPID_CMD_SLEEP_IN		0x10
+#define MIPID_CMD_SLEEP_OUT		0x11
+#define MIPID_CMD_DISP_OFF		0x28
+#define MIPID_CMD_DISP_ON		0x29
+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS	0x51
+#define MIPID_CMD_READ_DISP_BRIGHTNESS	0x52
+#define MIPID_CMD_WRITE_CTRL_DISP	0x53
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON	(1 << 5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON	(1 << 4)
+#define CTRL_DISP_BACKLIGHT_ON		(1 << 2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON	(1 << 1)
+
+#define MIPID_CMD_READ_CTRL_DISP	0x54
+#define MIPID_CMD_WRITE_CABC		0x55
+#define MIPID_CMD_READ_CABC		0x56
+
+#define MIPID_VER_LPH8923		3
+#define MIPID_VER_LS041Y3		4
+#define MIPID_VER_L4F00311		8
+#define MIPID_VER_ACX565AKM		9
+
+struct acx565akm_device {
+	char		*name;
+	int		enabled;
+	int		model;
+	int		revision;
+	u8		display_id[3];
+	unsigned	has_bc:1;
+	unsigned	has_cabc:1;
+	unsigned	cabc_mode;
+	unsigned long	hw_guard_end;		/* next value of jiffies
+						   when we can issue the
+						   next sleep in/out command */
+	unsigned long	hw_guard_wait;		/* max guard time in jiffies */
+
+	struct spi_device	*spi;
+	struct mutex		mutex;
+
+	struct omap_dss_device	*dssdev;
+	struct backlight_device *bl_dev;
+};
+
+static struct acx565akm_device acx_dev;
+static int acx565akm_bl_update_status(struct backlight_device *dev);
+
+/*--------------------MIPID interface-----------------------------*/
+
+static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
+			      const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+	struct spi_message	m;
+	struct spi_transfer	*x, xfer[5];
+	int			r;
+
+	BUG_ON(md->spi == NULL);
+
+	spi_message_init(&m);
+
+	memset(xfer, 0, sizeof(xfer));
+	x = &xfer[0];
+
+	cmd &=  0xff;
+	x->tx_buf = &cmd;
+	x->bits_per_word = 9;
+	x->len = 2;
+
+	if (rlen > 1 && wlen == 0) {
+		/*
+		 * Between the command and the response data there is a
+		 * dummy clock cycle. Add an extra bit after the command
+		 * word to account for this.
+		 */
+		x->bits_per_word = 10;
+		cmd <<= 1;
+	}
+	spi_message_add_tail(x, &m);
+
+	if (wlen) {
+		x++;
+		x->tx_buf = wbuf;
+		x->len = wlen;
+		x->bits_per_word = 9;
+		spi_message_add_tail(x, &m);
+	}
+
+	if (rlen) {
+		x++;
+		x->rx_buf	= rbuf;
+		x->len		= rlen;
+		spi_message_add_tail(x, &m);
+	}
+
+	r = spi_sync(md->spi, &m);
+	if (r < 0)
+		dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
+{
+	acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_device *md,
+			       int reg, const u8 *buf, int len)
+{
+	acx565akm_transfer(md, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_device *md,
+			      int reg, u8 *buf, int len)
+{
+	acx565akm_transfer(md, reg, NULL, 0, buf, len);
+}
+
+static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
+{
+	md->hw_guard_wait = msecs_to_jiffies(guard_msec);
+	md->hw_guard_end = jiffies + md->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct acx565akm_device *md)
+{
+	unsigned long wait = md->hw_guard_end - jiffies;
+
+	if ((long)wait > 0 && wait <= md->hw_guard_wait) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(wait);
+	}
+}
+
+/*----------------------MIPID wrappers----------------------------*/
+
+static void set_sleep_mode(struct acx565akm_device *md, int on)
+{
+	int cmd;
+
+	if (on)
+		cmd = MIPID_CMD_SLEEP_IN;
+	else
+		cmd = MIPID_CMD_SLEEP_OUT;
+	/*
+	 * We have to keep 120msec between sleep in/out commands.
+	 * (8.2.15, 8.2.16).
+	 */
+	hw_guard_wait(md);
+	acx565akm_cmd(md, cmd);
+	hw_guard_start(md, 120);
+}
+
+static void set_display_state(struct acx565akm_device *md, int enabled)
+{
+	int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+	acx565akm_cmd(md, cmd);
+}
+
+static int panel_enabled(struct acx565akm_device *md)
+{
+	u32 disp_status;
+	int enabled;
+
+	acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
+	disp_status = __be32_to_cpu(disp_status);
+	enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+	dev_dbg(&md->spi->dev,
+		"LCD panel %senabled by bootloader (status 0x%04x)\n",
+		enabled ? "" : "not ", disp_status);
+	return enabled;
+}
+
+static int panel_detect(struct acx565akm_device *md)
+{
+	acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
+	dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+		md->display_id[0], md->display_id[1], md->display_id[2]);
+
+	switch (md->display_id[0]) {
+	case 0x10:
+		md->model = MIPID_VER_ACX565AKM;
+		md->name = "acx565akm";
+		md->has_bc = 1;
+		md->has_cabc = 1;
+		break;
+	case 0x29:
+		md->model = MIPID_VER_L4F00311;
+		md->name = "l4f00311";
+		break;
+	case 0x45:
+		md->model = MIPID_VER_LPH8923;
+		md->name = "lph8923";
+		break;
+	case 0x83:
+		md->model = MIPID_VER_LS041Y3;
+		md->name = "ls041y3";
+		break;
+	default:
+		md->name = "unknown";
+		dev_err(&md->spi->dev, "invalid display ID\n");
+		return -ENODEV;
+	}
+
+	md->revision = md->display_id[1];
+
+	dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
+			md->name, md->revision);
+
+	return 0;
+}
+
+/*----------------------Backlight Control-------------------------*/
+
+static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
+{
+	u16 ctrl;
+
+	acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
+	if (enable) {
+		ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+			CTRL_DISP_BACKLIGHT_ON;
+	} else {
+		ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+			  CTRL_DISP_BACKLIGHT_ON);
+	}
+
+	ctrl |= 1 << 8;
+	acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
+}
+
+static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
+{
+	u16 cabc_ctrl;
+
+	md->cabc_mode = mode;
+	if (!md->enabled)
+		return;
+	cabc_ctrl = 0;
+	acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+	cabc_ctrl &= ~3;
+	cabc_ctrl |= (1 << 8) | (mode & 3);
+	acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned get_cabc_mode(struct acx565akm_device *md)
+{
+	return md->cabc_mode;
+}
+
+static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
+{
+	u8 cabc_ctrl;
+
+	acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+	return cabc_ctrl & 3;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
+{
+	int bv;
+
+	bv = level | (1 << 8);
+	acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
+
+	if (level)
+		enable_backlight_ctrl(md, 1);
+	else
+		enable_backlight_ctrl(md, 0);
+}
+
+static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
+{
+	u8 bv;
+
+	acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
+
+	return bv;
+}
+
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+	struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+	int r;
+	int level;
+
+	dev_dbg(&md->spi->dev, "%s\n", __func__);
+
+	mutex_lock(&md->mutex);
+
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK)
+		level = dev->props.brightness;
+	else
+		level = 0;
+
+	r = 0;
+	if (md->has_bc)
+		acx565akm_set_brightness(md, level);
+	else if (md->dssdev->set_backlight)
+		r = md->dssdev->set_backlight(md->dssdev, level);
+	else
+		r = -ENODEV;
+
+	mutex_unlock(&md->mutex);
+
+	return r;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+	struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+
+	if (!md->has_bc && md->dssdev->set_backlight == NULL)
+		return -ENODEV;
+
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK) {
+		if (md->has_bc)
+			return acx565akm_get_actual_brightness(md);
+		else
+			return dev->props.brightness;
+	}
+
+	return 0;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+	.get_brightness = acx565akm_bl_get_intensity,
+	.update_status  = acx565akm_bl_update_status,
+};
+
+/*--------------------Auto Brightness control via Sysfs---------------------*/
+
+static const char *cabc_modes[] = {
+	"off",		/* always used when CABC is not supported */
+	"ui",
+	"still-image",
+	"moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct acx565akm_device *md = dev_get_drvdata(dev);
+	const char *mode_str;
+	int mode;
+	int len;
+
+	if (!md->has_cabc)
+		mode = 0;
+	else
+		mode = get_cabc_mode(md);
+	mode_str = "unknown";
+	if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+		mode_str = cabc_modes[mode];
+	len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+	return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct acx565akm_device *md = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+		const char *mode_str = cabc_modes[i];
+		int cmp_len = strlen(mode_str);
+
+		if (count > 0 && buf[count - 1] == '\n')
+			count--;
+		if (count != cmp_len)
+			continue;
+
+		if (strncmp(buf, mode_str, cmp_len) == 0)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(cabc_modes))
+		return -EINVAL;
+
+	if (!md->has_cabc && i != 0)
+		return -EINVAL;
+
+	mutex_lock(&md->mutex);
+	set_cabc_mode(md, i);
+	mutex_unlock(&md->mutex);
+
+	return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct acx565akm_device *md = dev_get_drvdata(dev);
+	int len;
+	int i;
+
+	if (!md->has_cabc)
+		return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+
+	for (i = 0, len = 0;
+	     len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+		len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+			i ? " " : "", cabc_modes[i],
+			i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+	return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+		show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+		show_cabc_available_modes, NULL);
+
+static struct attribute *bldev_attrs[] = {
+	&dev_attr_cabc_mode.attr,
+	&dev_attr_cabc_available_modes.attr,
+	NULL,
+};
+
+static struct attribute_group bldev_attr_group = {
+	.attrs = bldev_attrs,
+};
+
+
+/*---------------------------ACX Panel----------------------------*/
+
+static int acx_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+	return 16;
+}
+
+static struct omap_video_timings acx_panel_timings = {
+	.x_res		= 800,
+	.y_res		= 480,
+	.pixel_clock	= 24000,
+	.hfp		= 28,
+	.hsw		= 4,
+	.hbp		= 24,
+	.vfp		= 3,
+	.vsw		= 3,
+	.vbp		= 4,
+};
+
+static int acx_panel_probe(struct omap_dss_device *dssdev)
+{
+	int r;
+	struct acx565akm_device *md = &acx_dev;
+	struct backlight_device *bldev;
+	int max_brightness, brightness;
+	struct backlight_properties props;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					OMAP_DSS_LCD_IHS;
+	/* FIXME AC bias ? */
+	dssdev->panel.timings = acx_panel_timings;
+
+	if (dssdev->platform_enable)
+		dssdev->platform_enable(dssdev);
+	/*
+	 * After reset we have to wait 5 msec before the first
+	 * command can be sent.
+	 */
+	msleep(5);
+
+	md->enabled = panel_enabled(md);
+
+	r = panel_detect(md);
+	if (r) {
+		dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
+		if (!md->enabled && dssdev->platform_disable)
+			dssdev->platform_disable(dssdev);
+		return r;
+	}
+
+	mutex_lock(&acx_dev.mutex);
+	acx_dev.dssdev = dssdev;
+	mutex_unlock(&acx_dev.mutex);
+
+	if (!md->enabled) {
+		if (dssdev->platform_disable)
+			dssdev->platform_disable(dssdev);
+	}
+
+	/*------- Backlight control --------*/
+
+	props.fb_blank = FB_BLANK_UNBLANK;
+	props.power = FB_BLANK_UNBLANK;
+
+	bldev = backlight_device_register("acx565akm", &md->spi->dev,
+			md, &acx565akm_bl_ops, &props);
+	md->bl_dev = bldev;
+	if (md->has_cabc) {
+		r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
+		if (r) {
+			dev_err(&bldev->dev,
+				"%s failed to create sysfs files\n", __func__);
+			backlight_device_unregister(bldev);
+			return r;
+		}
+		md->cabc_mode = get_hw_cabc_mode(md);
+	}
+
+	if (md->has_bc)
+		max_brightness = 255;
+	else
+		max_brightness = dssdev->max_backlight_level;
+
+	if (md->has_bc)
+		brightness = acx565akm_get_actual_brightness(md);
+	else if (dssdev->get_backlight)
+		brightness = dssdev->get_backlight(dssdev);
+	else
+		brightness = 0;
+
+	bldev->props.max_brightness = max_brightness;
+	bldev->props.brightness = brightness;
+
+	acx565akm_bl_update_status(bldev);
+	return 0;
+}
+
+static void acx_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct acx565akm_device *md = &acx_dev;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
+	backlight_device_unregister(md->bl_dev);
+	mutex_lock(&acx_dev.mutex);
+	acx_dev.dssdev = NULL;
+	mutex_unlock(&acx_dev.mutex);
+}
+
+static int acx_panel_power_on(struct omap_dss_device *dssdev)
+{
+	struct acx565akm_device *md = &acx_dev;
+	int r;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+	mutex_lock(&md->mutex);
+
+	r = omapdss_sdi_display_enable(dssdev);
+	if (r) {
+		pr_err("%s sdi enable failed\n", __func__);
+		return r;
+	}
+
+	/*FIXME tweak me */
+	msleep(50);
+
+	if (dssdev->platform_enable) {
+		r = dssdev->platform_enable(dssdev);
+		if (r)
+			goto fail;
+	}
+
+	if (md->enabled) {
+		dev_dbg(&md->spi->dev, "panel already enabled\n");
+		mutex_unlock(&md->mutex);
+		return 0;
+	}
+
+	/*
+	 * We have to meet all the following delay requirements:
+	 * 1. tRW: reset pulse width 10usec (7.12.1)
+	 * 2. tRT: reset cancel time 5msec (7.12.1)
+	 * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+	 *    case (7.6.2)
+	 * 4. 120msec before the sleep out command (7.12.1)
+	 */
+	msleep(120);
+
+	set_sleep_mode(md, 0);
+	md->enabled = 1;
+
+	/* 5msec between sleep out and the next command. (8.2.16) */
+	msleep(5);
+	set_display_state(md, 1);
+	set_cabc_mode(md, md->cabc_mode);
+
+	mutex_unlock(&md->mutex);
+
+	return acx565akm_bl_update_status(md->bl_dev);
+fail:
+	omapdss_sdi_display_disable(dssdev);
+	return r;
+}
+
+static void acx_panel_power_off(struct omap_dss_device *dssdev)
+{
+	struct acx565akm_device *md = &acx_dev;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+	mutex_lock(&md->mutex);
+
+	if (!md->enabled) {
+		mutex_unlock(&md->mutex);
+		return;
+	}
+	set_display_state(md, 0);
+	set_sleep_mode(md, 1);
+	md->enabled = 0;
+	/*
+	 * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+	 * ~50msec) after sending the sleep in command and asserting the
+	 * reset signal. We probably could assert the reset w/o the delay
+	 * but we still delay to avoid possible artifacts. (7.6.1)
+	 */
+	msleep(50);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	/* FIXME need to tweak this delay */
+	msleep(100);
+
+	omapdss_sdi_display_disable(dssdev);
+
+	mutex_unlock(&md->mutex);
+}
+
+static int acx_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	r = acx_panel_power_on(dssdev);
+
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+	return 0;
+}
+
+static void acx_panel_disable(struct omap_dss_device *dssdev)
+{
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	acx_panel_power_off(dssdev);
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int acx_panel_suspend(struct omap_dss_device *dssdev)
+{
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	acx_panel_power_off(dssdev);
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+	return 0;
+}
+
+static int acx_panel_resume(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	dev_dbg(&dssdev->dev, "%s\n", __func__);
+	r = acx_panel_power_on(dssdev);
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+	return 0;
+}
+
+static void acx_panel_set_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	int r;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+		omapdss_sdi_display_disable(dssdev);
+
+	dssdev->panel.timings = *timings;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+		r = omapdss_sdi_display_enable(dssdev);
+		if (r)
+			dev_err(&dssdev->dev, "%s enable failed\n", __func__);
+	}
+}
+
+static void acx_panel_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static int acx_panel_check_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	return 0;
+}
+
+
+static struct omap_dss_driver acx_panel_driver = {
+	.probe		= acx_panel_probe,
+	.remove		= acx_panel_remove,
+
+	.enable		= acx_panel_enable,
+	.disable	= acx_panel_disable,
+	.suspend	= acx_panel_suspend,
+	.resume		= acx_panel_resume,
+
+	.set_timings	= acx_panel_set_timings,
+	.get_timings	= acx_panel_get_timings,
+	.check_timings	= acx_panel_check_timings,
+
+	.get_recommended_bpp = acx_get_recommended_bpp,
+
+	.driver         = {
+		.name   = "panel-acx565akm",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*--------------------SPI probe-------------------------*/
+
+static int acx565akm_spi_probe(struct spi_device *spi)
+{
+	struct acx565akm_device *md = &acx_dev;
+
+	dev_dbg(&spi->dev, "%s\n", __func__);
+
+	spi->mode = SPI_MODE_3;
+	md->spi = spi;
+	mutex_init(&md->mutex);
+	dev_set_drvdata(&spi->dev, md);
+
+	omap_dss_register_driver(&acx_panel_driver);
+
+	return 0;
+}
+
+static int acx565akm_spi_remove(struct spi_device *spi)
+{
+	struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
+
+	dev_dbg(&md->spi->dev, "%s\n", __func__);
+	omap_dss_unregister_driver(&acx_panel_driver);
+
+	return 0;
+}
+
+static struct spi_driver acx565akm_spi_driver = {
+	.driver = {
+		.name	= "acx565akm",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe	= acx565akm_spi_probe,
+	.remove	= __devexit_p(acx565akm_spi_remove),
+};
+
+static int __init acx565akm_init(void)
+{
+	return spi_register_driver(&acx565akm_spi_driver);
+}
+
+static void __exit acx565akm_exit(void)
+{
+	spi_unregister_driver(&acx565akm_spi_driver);
+}
+
+module_init(acx565akm_init);
+module_exit(acx565akm_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("acx565akm LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 8d51a5e..7d9eb2b 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -20,10 +20,17 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 #include <plat/display.h>
 
+struct sharp_data {
+	struct backlight_device *bl;
+};
+
 static struct omap_video_timings sharp_ls_timings = {
 	.x_res = 480,
 	.y_res = 640,
@@ -39,18 +46,89 @@
 	.vbp		= 1,
 };
 
+static int sharp_ls_bl_update_status(struct backlight_device *bl)
+{
+	struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+	int level;
+
+	if (!dssdev->set_backlight)
+		return -EINVAL;
+
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		level = bl->props.brightness;
+	else
+		level = 0;
+
+	return dssdev->set_backlight(dssdev, level);
+}
+
+static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
+{
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		return bl->props.brightness;
+
+	return 0;
+}
+
+static const struct backlight_ops sharp_ls_bl_ops = {
+	.get_brightness = sharp_ls_bl_get_brightness,
+	.update_status  = sharp_ls_bl_update_status,
+};
+
+
+
 static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
 {
+	struct backlight_properties props;
+	struct backlight_device *bl;
+	struct sharp_data *sd;
+	int r;
+
 	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
 		OMAP_DSS_LCD_IHS;
 	dssdev->panel.acb = 0x28;
 	dssdev->panel.timings = sharp_ls_timings;
 
+	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+	if (!sd)
+		return -ENOMEM;
+
+	dev_set_drvdata(&dssdev->dev, sd);
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = dssdev->max_backlight_level;
+
+	bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
+			&sharp_ls_bl_ops, &props);
+	if (IS_ERR(bl)) {
+		r = PTR_ERR(bl);
+		kfree(sd);
+		return r;
+	}
+	sd->bl = bl;
+
+	bl->props.fb_blank = FB_BLANK_UNBLANK;
+	bl->props.power = FB_BLANK_UNBLANK;
+	bl->props.brightness = dssdev->max_backlight_level;
+	r = sharp_ls_bl_update_status(bl);
+	if (r < 0)
+		dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
 	return 0;
 }
 
 static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
 {
+	struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = sd->bl;
+
+	bl->props.power = FB_BLANK_POWERDOWN;
+	sharp_ls_bl_update_status(bl);
+	backlight_device_unregister(bl);
+
+	kfree(sd);
 }
 
 static int sharp_ls_power_on(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 4f3988a..aaf5d30 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -31,6 +31,7 @@
 #include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
+#include <linux/mutex.h>
 
 #include <plat/display.h>
 
@@ -67,6 +68,8 @@
 static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 struct taal_data {
+	struct mutex lock;
+
 	struct backlight_device *bldev;
 
 	unsigned long	hw_guard_end;	/* next value of jiffies when we can
@@ -510,6 +513,8 @@
 	}
 	td->dssdev = dssdev;
 
+	mutex_init(&td->lock);
+
 	td->esd_wq = create_singlethread_workqueue("taal_esd");
 	if (td->esd_wq == NULL) {
 		dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -697,10 +702,9 @@
 
 	return 0;
 err:
-	dsi_bus_unlock();
-
 	omapdss_dsi_display_disable(dssdev);
 err0:
+	dsi_bus_unlock();
 	if (dssdev->platform_disable)
 		dssdev->platform_disable(dssdev);
 
@@ -733,54 +737,96 @@
 
 static int taal_enable(struct omap_dss_device *dssdev)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 	int r;
+
 	dev_dbg(&dssdev->dev, "enable\n");
 
-	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
-		return -EINVAL;
+	mutex_lock(&td->lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+		r = -EINVAL;
+		goto err;
+	}
 
 	r = taal_power_on(dssdev);
 	if (r)
-		return r;
+		goto err;
 
 	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
+	mutex_unlock(&td->lock);
+
+	return 0;
+err:
+	dev_dbg(&dssdev->dev, "enable failed\n");
+	mutex_unlock(&td->lock);
 	return r;
 }
 
 static void taal_disable(struct omap_dss_device *dssdev)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
 	dev_dbg(&dssdev->dev, "disable\n");
 
+	mutex_lock(&td->lock);
+
 	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
 		taal_power_off(dssdev);
 
 	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+	mutex_unlock(&td->lock);
 }
 
 static int taal_suspend(struct omap_dss_device *dssdev)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int r;
+
 	dev_dbg(&dssdev->dev, "suspend\n");
 
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return -EINVAL;
+	mutex_lock(&td->lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+		r = -EINVAL;
+		goto err;
+	}
 
 	taal_power_off(dssdev);
 	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
 
+	mutex_unlock(&td->lock);
+
 	return 0;
+err:
+	mutex_unlock(&td->lock);
+	return r;
 }
 
 static int taal_resume(struct omap_dss_device *dssdev)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 	int r;
+
 	dev_dbg(&dssdev->dev, "resume\n");
 
-	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
-		return -EINVAL;
+	mutex_lock(&td->lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+		r = -EINVAL;
+		goto err;
+	}
 
 	r = taal_power_on(dssdev);
 	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	mutex_unlock(&td->lock);
+
+	return r;
+err:
+	mutex_unlock(&td->lock);
 	return r;
 }
 
@@ -799,6 +845,7 @@
 
 	dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 
 	if (!td->enabled) {
@@ -820,18 +867,24 @@
 		goto err;
 
 	/* note: no bus_unlock here. unlock is in framedone_cb */
+	mutex_unlock(&td->lock);
 	return 0;
 err:
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return r;
 }
 
 static int taal_sync(struct omap_dss_device *dssdev)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
 	dev_dbg(&dssdev->dev, "sync\n");
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 
 	dev_dbg(&dssdev->dev, "sync done\n");
 
@@ -861,13 +914,16 @@
 
 static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 	int r;
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 
 	r = _taal_enable_te(dssdev, enable);
 
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 
 	return r;
 }
@@ -875,7 +931,13 @@
 static int taal_get_te(struct omap_dss_device *dssdev)
 {
 	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-	return td->te_enabled;
+	int r;
+
+	mutex_lock(&td->lock);
+	r = td->te_enabled;
+	mutex_unlock(&td->lock);
+
+	return r;
 }
 
 static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
@@ -885,6 +947,7 @@
 
 	dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 
 	if (td->enabled) {
@@ -896,16 +959,24 @@
 	td->rotate = rotate;
 
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return 0;
 err:
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return r;
 }
 
 static u8 taal_get_rotate(struct omap_dss_device *dssdev)
 {
 	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-	return td->rotate;
+	int r;
+
+	mutex_lock(&td->lock);
+	r = td->rotate;
+	mutex_unlock(&td->lock);
+
+	return r;
 }
 
 static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
@@ -915,6 +986,7 @@
 
 	dev_dbg(&dssdev->dev, "mirror %d\n", enable);
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 	if (td->enabled) {
 		r = taal_set_addr_mode(td->rotate, enable);
@@ -925,23 +997,33 @@
 	td->mirror = enable;
 
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return 0;
 err:
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return r;
 }
 
 static bool taal_get_mirror(struct omap_dss_device *dssdev)
 {
 	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-	return td->mirror;
+	int r;
+
+	mutex_lock(&td->lock);
+	r = td->mirror;
+	mutex_unlock(&td->lock);
+
+	return r;
 }
 
 static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
 {
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 	u8 id1, id2, id3;
 	int r;
 
+	mutex_lock(&td->lock);
 	dsi_bus_lock();
 
 	r = taal_dcs_read_1(DCS_GET_ID1, &id1);
@@ -955,9 +1037,11 @@
 		goto err;
 
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return 0;
 err:
 	dsi_bus_unlock();
+	mutex_unlock(&td->lock);
 	return r;
 }
 
@@ -971,12 +1055,16 @@
 	unsigned buf_used = 0;
 	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 
-	if (!td->enabled)
-		return -ENODEV;
-
 	if (size < w * h * 3)
 		return -ENOMEM;
 
+	mutex_lock(&td->lock);
+
+	if (!td->enabled) {
+		r = -ENODEV;
+		goto err1;
+	}
+
 	size = min(w * h * 3,
 			dssdev->panel.timings.x_res *
 			dssdev->panel.timings.y_res * 3);
@@ -995,7 +1083,7 @@
 
 	r = dsi_vc_set_max_rx_packet_size(TCH, plen);
 	if (r)
-		goto err0;
+		goto err2;
 
 	while (buf_used < size) {
 		u8 dcs_cmd = first ? 0x2e : 0x3e;
@@ -1006,7 +1094,7 @@
 
 		if (r < 0) {
 			dev_err(&dssdev->dev, "read error\n");
-			goto err;
+			goto err3;
 		}
 
 		buf_used += r;
@@ -1020,16 +1108,18 @@
 			dev_err(&dssdev->dev, "signal pending, "
 					"aborting memory read\n");
 			r = -ERESTARTSYS;
-			goto err;
+			goto err3;
 		}
 	}
 
 	r = buf_used;
 
-err:
+err3:
 	dsi_vc_set_max_rx_packet_size(TCH, 1);
-err0:
+err2:
 	dsi_bus_unlock();
+err1:
+	mutex_unlock(&td->lock);
 	return r;
 }
 
@@ -1041,8 +1131,12 @@
 	u8 state1, state2;
 	int r;
 
-	if (!td->enabled)
+	mutex_lock(&td->lock);
+
+	if (!td->enabled) {
+		mutex_unlock(&td->lock);
 		return;
+	}
 
 	dsi_bus_lock();
 
@@ -1084,16 +1178,19 @@
 
 	queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
 
+	mutex_unlock(&td->lock);
 	return;
 err:
 	dev_err(&dssdev->dev, "performing LCD reset\n");
 
-	taal_disable(dssdev);
-	taal_enable(dssdev);
+	taal_power_off(dssdev);
+	taal_power_on(dssdev);
 
 	dsi_bus_unlock();
 
 	queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+	mutex_unlock(&td->lock);
 }
 
 static int taal_set_update_mode(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 87afb81..43b6440 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -36,6 +36,12 @@
 	  <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
 	  <debugfs>/omapdss/dsi_irq for DSI interrupts.
 
+config OMAP2_DSS_DPI
+	bool "DPI support"
+	default y
+	help
+	  DPI Interface. This is the Parallel Display Interface.
+
 config OMAP2_DSS_RFBI
 	bool "RFBI support"
         default n
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 980c72c..d71b5d9 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
 omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
 omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
 omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 7ebe50b..b3a498f 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -482,6 +482,14 @@
 	if (dss_debugfs_dir)
 		debugfs_remove_recursive(dss_debugfs_dir);
 }
+#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+static inline int dss_initialize_debugfs(void)
+{
+	return 0;
+}
+static inline void dss_uninitialize_debugfs(void)
+{
+}
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
@@ -499,7 +507,7 @@
 
 	r = dss_get_clocks();
 	if (r)
-		goto fail0;
+		goto err_clocks;
 
 	dss_clk_enable_all_no_ctx();
 
@@ -515,64 +523,64 @@
 	r = dss_init(skip_init);
 	if (r) {
 		DSSERR("Failed to initialize DSS\n");
-		goto fail0;
+		goto err_dss;
 	}
 
-#ifdef CONFIG_OMAP2_DSS_RFBI
 	r = rfbi_init();
 	if (r) {
 		DSSERR("Failed to initialize rfbi\n");
-		goto fail0;
+		goto err_rfbi;
 	}
-#endif
 
 	r = dpi_init(pdev);
 	if (r) {
 		DSSERR("Failed to initialize dpi\n");
-		goto fail0;
+		goto err_dpi;
 	}
 
 	r = dispc_init();
 	if (r) {
 		DSSERR("Failed to initialize dispc\n");
-		goto fail0;
+		goto err_dispc;
 	}
-#ifdef CONFIG_OMAP2_DSS_VENC
+
 	r = venc_init(pdev);
 	if (r) {
 		DSSERR("Failed to initialize venc\n");
-		goto fail0;
+		goto err_venc;
 	}
-#endif
+
 	if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_SDI
 		r = sdi_init(skip_init);
 		if (r) {
 			DSSERR("Failed to initialize SDI\n");
-			goto fail0;
+			goto err_sdi;
 		}
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
+
 		r = dsi_init(pdev);
 		if (r) {
 			DSSERR("Failed to initialize DSI\n");
-			goto fail0;
+			goto err_dsi;
 		}
-#endif
 	}
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 	r = dss_initialize_debugfs();
 	if (r)
-		goto fail0;
-#endif
+		goto err_debugfs;
 
 	for (i = 0; i < pdata->num_devices; ++i) {
 		struct omap_dss_device *dssdev = pdata->devices[i];
 
 		r = omap_dss_register_device(dssdev);
-		if (r)
-			DSSERR("device reg failed %d\n", i);
+		if (r) {
+			DSSERR("device %d %s register failed %d\n", i,
+				dssdev->name ?: "unnamed", r);
+
+			while (--i >= 0)
+				omap_dss_unregister_device(pdata->devices[i]);
+
+			goto err_register;
+		}
 
 		if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
 			pdata->default_device = dssdev;
@@ -582,8 +590,29 @@
 
 	return 0;
 
-	/* XXX fail correctly */
-fail0:
+err_register:
+	dss_uninitialize_debugfs();
+err_debugfs:
+	if (cpu_is_omap34xx())
+		dsi_exit();
+err_dsi:
+	if (cpu_is_omap34xx())
+		sdi_exit();
+err_sdi:
+	venc_exit();
+err_venc:
+	dispc_exit();
+err_dispc:
+	dpi_exit();
+err_dpi:
+	rfbi_exit();
+err_rfbi:
+	dss_exit();
+err_dss:
+	dss_clk_disable_all_no_ctx();
+	dss_put_clocks();
+err_clocks:
+
 	return r;
 }
 
@@ -593,25 +622,15 @@
 	int i;
 	int c;
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 	dss_uninitialize_debugfs();
-#endif
 
-#ifdef CONFIG_OMAP2_DSS_VENC
 	venc_exit();
-#endif
 	dispc_exit();
 	dpi_exit();
-#ifdef CONFIG_OMAP2_DSS_RFBI
 	rfbi_exit();
-#endif
 	if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_DSI
 		dsi_exit();
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
 		sdi_exit();
-#endif
 	}
 
 	dss_exit();
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 6a74ea1..ef8c852 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -392,7 +392,9 @@
 	int r;
 
 	switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
 	case OMAP_DISPLAY_TYPE_DPI:
+#endif
 #ifdef CONFIG_OMAP2_DSS_RFBI
 	case OMAP_DISPLAY_TYPE_DBI:
 #endif
@@ -413,9 +415,11 @@
 	}
 
 	switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
 	case OMAP_DISPLAY_TYPE_DPI:
 		r = dpi_init_display(dssdev);
 		break;
+#endif
 #ifdef CONFIG_OMAP2_DSS_RFBI
 	case OMAP_DISPLAY_TYPE_DBI:
 		r = rfbi_init_display(dssdev);
@@ -541,7 +545,10 @@
 static int dss_disable_device(struct device *dev, void *data)
 {
 	struct omap_dss_device *dssdev = to_dss_device(dev);
-	dssdev->driver->disable(dssdev);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+		dssdev->driver->disable(dssdev);
+
 	return 0;
 }
 
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 5434418..24b1825 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -223,7 +223,13 @@
 
 	seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
 
-	seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+	if (cpu_is_omap3630())
+		seq_printf(s, "dss1_alwon_fclk = %lu / %lu  = %lu\n",
+			dpll4_ck_rate,
+			dpll4_ck_rate / dpll4_m4_ck_rate,
+			dss_clk_get_rate(DSS_CLK_FCK1));
+	else
+		seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
 			dpll4_ck_rate,
 			dpll4_ck_rate / dpll4_m4_ck_rate,
 			dss_clk_get_rate(DSS_CLK_FCK1));
@@ -293,7 +299,8 @@
 {
 	unsigned long prate;
 
-	if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+	if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
+						cinfo->fck_div == 0)
 		return -EINVAL;
 
 	prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -329,7 +336,10 @@
 	if (cpu_is_omap34xx()) {
 		unsigned long prate;
 		prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
-		cinfo->fck_div = prate / (cinfo->fck / 2);
+		if (cpu_is_omap3630())
+			cinfo->fck_div = prate / (cinfo->fck);
+		else
+			cinfo->fck_div = prate / (cinfo->fck / 2);
 	} else {
 		cinfo->fck_div = 0;
 	}
@@ -402,10 +412,14 @@
 
 		goto found;
 	} else if (cpu_is_omap34xx()) {
-		for (fck_div = 16; fck_div > 0; --fck_div) {
+		for (fck_div = (cpu_is_omap3630() ? 32 : 16);
+					fck_div > 0; --fck_div) {
 			struct dispc_clock_info cur_dispc;
 
-			fck = prate / fck_div * 2;
+			if (cpu_is_omap3630())
+				fck = prate / fck_div;
+			else
+				fck = prate / fck_div * 2;
 
 			if (fck > DISPC_MAX_FCK)
 				continue;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 24326a5..786f433 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -242,11 +242,22 @@
 		struct dispc_clock_info *dispc_cinfo);
 
 /* SDI */
+#ifdef CONFIG_OMAP2_DSS_SDI
 int sdi_init(bool skip_init);
 void sdi_exit(void);
 int sdi_init_display(struct omap_dss_device *display);
+#else
+static inline int sdi_init(bool skip_init)
+{
+	return 0;
+}
+static inline void sdi_exit(void)
+{
+}
+#endif
 
 /* DSI */
+#ifdef CONFIG_OMAP2_DSS_DSI
 int dsi_init(struct platform_device *pdev);
 void dsi_exit(void);
 
@@ -270,11 +281,30 @@
 void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
 		u32 fifo_size, enum omap_burst_size *burst_size,
 		u32 *fifo_low, u32 *fifo_high);
+#else
+static inline int dsi_init(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void dsi_exit(void)
+{
+}
+#endif
 
 /* DPI */
+#ifdef CONFIG_OMAP2_DSS_DPI
 int dpi_init(struct platform_device *pdev);
 void dpi_exit(void);
 int dpi_init_display(struct omap_dss_device *dssdev);
+#else
+static inline int dpi_init(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void dpi_exit(void)
+{
+}
+#endif
 
 /* DISPC */
 int dispc_init(void);
@@ -362,12 +392,23 @@
 
 
 /* VENC */
+#ifdef CONFIG_OMAP2_DSS_VENC
 int venc_init(struct platform_device *pdev);
 void venc_exit(void);
 void venc_dump_regs(struct seq_file *s);
 int venc_init_display(struct omap_dss_device *display);
+#else
+static inline int venc_init(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void venc_exit(void)
+{
+}
+#endif
 
 /* RFBI */
+#ifdef CONFIG_OMAP2_DSS_RFBI
 int rfbi_init(void);
 void rfbi_exit(void);
 void rfbi_dump_regs(struct seq_file *s);
@@ -379,6 +420,15 @@
 void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
 unsigned long rfbi_get_max_tx_rate(void);
 int rfbi_init_display(struct omap_dss_device *display);
+#else
+static inline int rfbi_init(void)
+{
+	return 0;
+}
+static inline void rfbi_exit(void)
+{
+}
+#endif
 
 
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 0820986..9e1fbe5 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -843,6 +843,7 @@
 
 	c = &dss_cache.manager_cache[channel];
 
+	dispc_set_default_color(channel, c->default_color);
 	dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
 	dispc_enable_trans_key(channel, c->trans_enabled);
 	dispc_enable_alpha_blending(channel, c->alpha_enabled);
@@ -940,6 +941,22 @@
 	return r;
 }
 
+/* Make the coordinates even. There are some strange problems with OMAP and
+ * partial DSI update when the update widths are odd. */
+static void make_even(u16 *x, u16 *w)
+{
+	u16 x1, x2;
+
+	x1 = *x;
+	x2 = *x + *w;
+
+	x1 &= ~1;
+	x2 = ALIGN(x2, 2);
+
+	*x = x1;
+	*w = x2 - x1;
+}
+
 /* Configure dispc for partial update. Return possibly modified update
  * area */
 void dss_setup_partial_planes(struct omap_dss_device *dssdev,
@@ -968,6 +985,8 @@
 		return;
 	}
 
+	make_even(&x, &w);
+
 	spin_lock_irqsave(&dss_cache.lock, flags);
 
 	/* We need to show the whole overlay if it is scaled. So look for
@@ -1029,6 +1048,8 @@
 		w = x2 - x1;
 		h = y2 - y1;
 
+		make_even(&x, &w);
+
 		DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
 				i, x, y, w, h);
 	}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 12eb404..ee07a3c 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -23,13 +23,16 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/regulator/consumer.h>
 
 #include <plat/display.h>
+#include <plat/cpu.h>
 #include "dss.h"
 
 static struct {
 	bool skip_init;
 	bool update_enabled;
+	struct regulator *vdds_sdi_reg;
 } sdi;
 
 static void sdi_basic_init(void)
@@ -57,6 +60,10 @@
 		goto err0;
 	}
 
+	r = regulator_enable(sdi.vdds_sdi_reg);
+	if (r)
+		goto err1;
+
 	/* In case of skip_init sdi_init has already enabled the clocks */
 	if (!sdi.skip_init)
 		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -115,19 +122,12 @@
 
 	dssdev->manager->enable(dssdev->manager);
 
-	if (dssdev->driver->enable) {
-		r = dssdev->driver->enable(dssdev);
-		if (r)
-			goto err3;
-	}
-
 	sdi.skip_init = 0;
 
 	return 0;
-err3:
-	dssdev->manager->disable(dssdev->manager);
 err2:
 	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	regulator_disable(sdi.vdds_sdi_reg);
 err1:
 	omap_dss_stop_device(dssdev);
 err0:
@@ -137,15 +137,14 @@
 
 void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
 {
-	if (dssdev->driver->disable)
-		dssdev->driver->disable(dssdev);
-
 	dssdev->manager->disable(dssdev->manager);
 
 	dss_sdi_disable();
 
 	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
+	regulator_disable(sdi.vdds_sdi_reg);
+
 	omap_dss_stop_device(dssdev);
 }
 EXPORT_SYMBOL(omapdss_sdi_display_disable);
@@ -162,6 +161,11 @@
 	/* we store this for first display enable, then clear it */
 	sdi.skip_init = skip_init;
 
+	sdi.vdds_sdi_reg = dss_get_vdds_sdi();
+	if (IS_ERR(sdi.vdds_sdi_reg)) {
+		DSSERR("can't get VDDS_SDI regulator\n");
+		return PTR_ERR(sdi.vdds_sdi_reg);
+	}
 	/*
 	 * Enable clocks already here, otherwise there would be a toggle
 	 * of them until sdi_display_enable is called.
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index f0ba573..eff3505 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -479,12 +479,6 @@
 		goto err1;
 	}
 
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err2;
-	}
-
 	venc_power_on(dssdev);
 
 	venc.wss_data = 0;
@@ -494,13 +488,9 @@
 	/* wait couple of vsyncs until enabling the LCD */
 	msleep(50);
 
-	mutex_unlock(&venc.venc_lock);
-
-	return r;
-err2:
-	venc_power_off(dssdev);
 err1:
 	mutex_unlock(&venc.venc_lock);
+
 	return r;
 }
 
@@ -524,9 +514,6 @@
 	/* wait at least 5 vsyncs after disabling the LCD */
 	msleep(100);
 
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
 	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 end:
 	mutex_unlock(&venc.venc_lock);
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 1ffa760..9c73618 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -183,13 +183,14 @@
 	struct omapfb2_device *fbdev = ofbi->fbdev;
 	int r;
 
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 	omapfb_lock(fbdev);
-	lock_fb_info(fbi);
 
 	r = omapfb_update_window_nolock(fbi, x, y, w, h);
 
-	unlock_fb_info(fbi);
 	omapfb_unlock(fbdev);
+	unlock_fb_info(fbi);
 
 	return r;
 }
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 62bb88f..5179219 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -57,7 +57,8 @@
 	if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
 		return -EINVAL;
 
-	lock_fb_info(fbi);
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 
 	r = 0;
 	if (rot_type == ofbi->rotation_type)
@@ -105,7 +106,8 @@
 	if (mirror != 0 && mirror != 1)
 		return -EINVAL;
 
-	lock_fb_info(fbi);
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 
 	ofbi->mirror = mirror;
 
@@ -137,8 +139,9 @@
 	ssize_t l = 0;
 	int t;
 
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 	omapfb_lock(fbdev);
-	lock_fb_info(fbi);
 
 	for (t = 0; t < ofbi->num_overlays; t++) {
 		struct omap_overlay *ovl = ofbi->overlays[t];
@@ -154,8 +157,8 @@
 
 	l += snprintf(buf + l, PAGE_SIZE - l, "\n");
 
-	unlock_fb_info(fbi);
 	omapfb_unlock(fbdev);
+	unlock_fb_info(fbi);
 
 	return l;
 }
@@ -195,8 +198,9 @@
 	if (buf[len - 1] == '\n')
 		len = len - 1;
 
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 	omapfb_lock(fbdev);
-	lock_fb_info(fbi);
 
 	if (len > 0) {
 		char *p = (char *)buf;
@@ -303,8 +307,8 @@
 
 	r = count;
 out:
-	unlock_fb_info(fbi);
 	omapfb_unlock(fbdev);
+	unlock_fb_info(fbi);
 
 	return r;
 }
@@ -317,7 +321,8 @@
 	ssize_t l = 0;
 	int t;
 
-	lock_fb_info(fbi);
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 
 	for (t = 0; t < ofbi->num_overlays; t++) {
 		l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
@@ -345,7 +350,8 @@
 	if (buf[len - 1] == '\n')
 		len = len - 1;
 
-	lock_fb_info(fbi);
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 
 	if (len > 0) {
 		char *p = (char *)buf;
@@ -416,7 +422,8 @@
 
 	size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
 
-	lock_fb_info(fbi);
+	if (!lock_fb_info(fbi))
+		return -ENODEV;
 
 	for (i = 0; i < ofbi->num_overlays; i++) {
 		if (ofbi->overlays[i]->info.enabled) {
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 0cadf7a..090aa1a 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -177,7 +177,7 @@
 {
 	if (info->screen_base)
 		iounmap(info->screen_base);
-	release_mem_region(info->aperture_base, info->aperture_size);
+	release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
 	framebuffer_release(info);
 }
 
@@ -295,8 +295,13 @@
 	info->par = NULL;
 
 	/* set vesafb aperture size for generic probing */
-	info->aperture_base = screen_info.lfb_base;
-	info->aperture_size = size_total;
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		err = -ENOMEM;
+		goto err;
+	}
+	info->apertures->ranges[0].base = screen_info.lfb_base;
+	info->apertures->ranges[0].size = size_total;
 
 	info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
 	if (!info->screen_base) {
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index bf638a4..149c47a 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1263,10 +1263,19 @@
 		vga_imageblit_color(info, image);
 }
 
+static void vga16fb_destroy(struct fb_info *info)
+{
+	iounmap(info->screen_base);
+	fb_dealloc_cmap(&info->cmap);
+	/* XXX unshare VGA regions */
+	framebuffer_release(info);
+}
+
 static struct fb_ops vga16fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open        = vga16fb_open,
 	.fb_release     = vga16fb_release,
+	.fb_destroy	= vga16fb_destroy,
 	.fb_check_var	= vga16fb_check_var,
 	.fb_set_par	= vga16fb_set_par,
 	.fb_setcolreg 	= vga16fb_setcolreg,
@@ -1306,6 +1315,11 @@
 		ret = -ENOMEM;
 		goto err_fb_alloc;
 	}
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
 
 	/* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
 	info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
@@ -1335,7 +1349,7 @@
 	info->fix = vga16fb_fix;
 	/* supports rectangles with widths of multiples of 8 */
 	info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
-	info->flags = FBINFO_FLAG_DEFAULT |
+	info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
 		FBINFO_HWACCEL_YPAN;
 
 	i = (info->var.bits_per_pixel == 8) ? 256 : 16;
@@ -1354,6 +1368,9 @@
 
 	vga16fb_update_fix(info);
 
+	info->apertures->ranges[0].base = VGA_FB_PHYS;
+	info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
+
 	if (register_framebuffer(info) < 0) {
 		printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
 		ret = -EINVAL;
@@ -1380,13 +1397,8 @@
 {
 	struct fb_info *info = platform_get_drvdata(dev);
 
-	if (info) {
+	if (info)
 		unregister_framebuffer(info);
-		iounmap(info->screen_base);
-		fb_dealloc_cmap(&info->cmap);
-	/* XXX unshare VGA regions */
-		framebuffer_release(info);
-	}
 
 	return 0;
 }
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 2c6c0cf..84e2410 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -96,7 +96,7 @@
 	return -1;
 }
 
-static ssize_t w1_f2d_read_bin(struct kobject *kobj,
+static ssize_t w1_f2d_read_bin(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
@@ -202,7 +202,7 @@
 	return 0;
 }
 
-static ssize_t w1_f2d_write_bin(struct kobject *kobj,
+static ssize_t w1_f2d_write_bin(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index d2bf321..0f7b8f9 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -92,7 +92,7 @@
 }
 #endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
-static ssize_t w1_f23_read_bin(struct kobject *kobj,
+static ssize_t w1_f23_read_bin(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
@@ -206,7 +206,7 @@
 	return 0;
 }
 
-static ssize_t w1_f23_write_bin(struct kobject *kobj,
+static ssize_t w1_f23_write_bin(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 6e15334..483d451 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -97,7 +97,7 @@
 	return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
 }
 
-static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
+static ssize_t w1_ds2760_read_bin(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index ad5897d..2839e28 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -120,7 +120,7 @@
 
 /* Default family */
 
-static ssize_t w1_default_write(struct kobject *kobj,
+static ssize_t w1_default_write(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *bin_attr,
 				char *buf, loff_t off, size_t count)
 {
@@ -139,7 +139,7 @@
 	return count;
 }
 
-static ssize_t w1_default_read(struct kobject *kobj,
+static ssize_t w1_default_read(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index eb924e0..26f7184 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -49,7 +49,7 @@
 
 static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
 
-static ssize_t zorro_read_config(struct kobject *kobj,
+static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t off, size_t count)
 {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6dcee88..55dcb78 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -417,7 +417,7 @@
 	 */
 	mutex_unlock(&bd_inode->i_mutex);
 
-	error = blkdev_issue_flush(bdev, NULL);
+	error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
 	if (error == -EOPNOTSUPP)
 		error = 0;
 
@@ -668,41 +668,209 @@
 		iput(bdev->bd_inode);
 }
 
-int bd_claim(struct block_device *bdev, void *holder)
+/**
+ * bd_may_claim - test whether a block device can be claimed
+ * @bdev: block device of interest
+ * @whole: whole block device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Test whther @bdev can be claimed by @holder.
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock).
+ *
+ * RETURNS:
+ * %true if @bdev can be claimed, %false otherwise.
+ */
+static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+			 void *holder)
 {
-	int res;
+	if (bdev->bd_holder == holder)
+		return true;	 /* already a holder */
+	else if (bdev->bd_holder != NULL)
+		return false; 	 /* held by someone else */
+	else if (bdev->bd_contains == bdev)
+		return true;  	 /* is a whole device which isn't held */
+
+	else if (whole->bd_holder == bd_claim)
+		return true; 	 /* is a partition of a device that is being partitioned */
+	else if (whole->bd_holder != NULL)
+		return false;	 /* is a partition of a held device */
+	else
+		return true;	 /* is a partition of an un-held device */
+}
+
+/**
+ * bd_prepare_to_claim - prepare to claim a block device
+ * @bdev: block device of interest
+ * @whole: the whole device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Prepare to claim @bdev.  This function fails if @bdev is already
+ * claimed by another holder and waits if another claiming is in
+ * progress.  This function doesn't actually claim.  On successful
+ * return, the caller has ownership of bd_claiming and bd_holder[s].
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock).  Might release bdev_lock, sleep and regrab
+ * it multiple times.
+ *
+ * RETURNS:
+ * 0 if @bdev can be claimed, -EBUSY otherwise.
+ */
+static int bd_prepare_to_claim(struct block_device *bdev,
+			       struct block_device *whole, void *holder)
+{
+retry:
+	/* if someone else claimed, fail */
+	if (!bd_may_claim(bdev, whole, holder))
+		return -EBUSY;
+
+	/* if someone else is claiming, wait for it to finish */
+	if (whole->bd_claiming && whole->bd_claiming != holder) {
+		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+		DEFINE_WAIT(wait);
+
+		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+		spin_unlock(&bdev_lock);
+		schedule();
+		finish_wait(wq, &wait);
+		spin_lock(&bdev_lock);
+		goto retry;
+	}
+
+	/* yay, all mine */
+	return 0;
+}
+
+/**
+ * bd_start_claiming - start claiming a block device
+ * @bdev: block device of interest
+ * @holder: holder trying to claim @bdev
+ *
+ * @bdev is about to be opened exclusively.  Check @bdev can be opened
+ * exclusively and mark that an exclusive open is in progress.  Each
+ * successful call to this function must be matched with a call to
+ * either bd_claim() or bd_abort_claiming().  If this function
+ * succeeds, the matching bd_claim() is guaranteed to succeed.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to the block device containing @bdev on success, ERR_PTR()
+ * value on failure.
+ */
+static struct block_device *bd_start_claiming(struct block_device *bdev,
+					      void *holder)
+{
+	struct gendisk *disk;
+	struct block_device *whole;
+	int partno, err;
+
+	might_sleep();
+
+	/*
+	 * @bdev might not have been initialized properly yet, look up
+	 * and grab the outer block device the hard way.
+	 */
+	disk = get_gendisk(bdev->bd_dev, &partno);
+	if (!disk)
+		return ERR_PTR(-ENXIO);
+
+	whole = bdget_disk(disk, 0);
+	put_disk(disk);
+	if (!whole)
+		return ERR_PTR(-ENOMEM);
+
+	/* prepare to claim, if successful, mark claiming in progress */
 	spin_lock(&bdev_lock);
 
-	/* first decide result */
-	if (bdev->bd_holder == holder)
-		res = 0;	 /* already a holder */
-	else if (bdev->bd_holder != NULL)
-		res = -EBUSY; 	 /* held by someone else */
-	else if (bdev->bd_contains == bdev)
-		res = 0;  	 /* is a whole device which isn't held */
+	err = bd_prepare_to_claim(bdev, whole, holder);
+	if (err == 0) {
+		whole->bd_claiming = holder;
+		spin_unlock(&bdev_lock);
+		return whole;
+	} else {
+		spin_unlock(&bdev_lock);
+		bdput(whole);
+		return ERR_PTR(err);
+	}
+}
 
-	else if (bdev->bd_contains->bd_holder == bd_claim)
-		res = 0; 	 /* is a partition of a device that is being partitioned */
-	else if (bdev->bd_contains->bd_holder != NULL)
-		res = -EBUSY;	 /* is a partition of a held device */
-	else
-		res = 0;	 /* is a partition of an un-held device */
+/* releases bdev_lock */
+static void __bd_abort_claiming(struct block_device *whole, void *holder)
+{
+	BUG_ON(whole->bd_claiming != holder);
+	whole->bd_claiming = NULL;
+	wake_up_bit(&whole->bd_claiming, 0);
 
-	/* now impose change */
-	if (res==0) {
+	spin_unlock(&bdev_lock);
+	bdput(whole);
+}
+
+/**
+ * bd_abort_claiming - abort claiming a block device
+ * @whole: whole block device returned by bd_start_claiming()
+ * @holder: holder trying to claim @bdev
+ *
+ * Abort a claiming block started by bd_start_claiming().  Note that
+ * @whole is not the block device to be claimed but the whole device
+ * returned by bd_start_claiming().
+ *
+ * CONTEXT:
+ * Grabs and releases bdev_lock.
+ */
+static void bd_abort_claiming(struct block_device *whole, void *holder)
+{
+	spin_lock(&bdev_lock);
+	__bd_abort_claiming(whole, holder);		/* releases bdev_lock */
+}
+
+/**
+ * bd_claim - claim a block device
+ * @bdev: block device to claim
+ * @holder: holder trying to claim @bdev
+ *
+ * Try to claim @bdev which must have been opened successfully.  This
+ * function may be called with or without preceding
+ * blk_start_claiming().  In the former case, this function is always
+ * successful and terminates the claiming block.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 if successful, -EBUSY if @bdev is already claimed.
+ */
+int bd_claim(struct block_device *bdev, void *holder)
+{
+	struct block_device *whole = bdev->bd_contains;
+	int res;
+
+	might_sleep();
+
+	spin_lock(&bdev_lock);
+
+	res = bd_prepare_to_claim(bdev, whole, holder);
+	if (res == 0) {
 		/* note that for a whole device bd_holders
 		 * will be incremented twice, and bd_holder will
 		 * be set to bd_claim before being set to holder
 		 */
-		bdev->bd_contains->bd_holders ++;
-		bdev->bd_contains->bd_holder = bd_claim;
+		whole->bd_holders++;
+		whole->bd_holder = bd_claim;
 		bdev->bd_holders++;
 		bdev->bd_holder = holder;
 	}
-	spin_unlock(&bdev_lock);
+
+	if (whole->bd_claiming)
+		__bd_abort_claiming(whole, holder);	/* releases bdev_lock */
+	else
+		spin_unlock(&bdev_lock);
+
 	return res;
 }
-
 EXPORT_SYMBOL(bd_claim);
 
 void bd_release(struct block_device *bdev)
@@ -1316,6 +1484,7 @@
 
 static int blkdev_open(struct inode * inode, struct file * filp)
 {
+	struct block_device *whole = NULL;
 	struct block_device *bdev;
 	int res;
 
@@ -1338,22 +1507,25 @@
 	if (bdev == NULL)
 		return -ENOMEM;
 
+	if (filp->f_mode & FMODE_EXCL) {
+		whole = bd_start_claiming(bdev, filp);
+		if (IS_ERR(whole)) {
+			bdput(bdev);
+			return PTR_ERR(whole);
+		}
+	}
+
 	filp->f_mapping = bdev->bd_inode->i_mapping;
 
 	res = blkdev_get(bdev, filp->f_mode);
-	if (res)
-		return res;
 
-	if (filp->f_mode & FMODE_EXCL) {
-		res = bd_claim(bdev, filp);
-		if (res)
-			goto out_blkdev_put;
+	if (whole) {
+		if (res == 0)
+			BUG_ON(bd_claim(bdev, filp) != 0);
+		else
+			bd_abort_claiming(whole, filp);
 	}
 
-	return 0;
-
- out_blkdev_put:
-	blkdev_put(bdev, filp->f_mode);
 	return res;
 }
 
@@ -1564,27 +1736,34 @@
  */
 struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
 {
-	struct block_device *bdev;
-	int error = 0;
+	struct block_device *bdev, *whole;
+	int error;
 
 	bdev = lookup_bdev(path);
 	if (IS_ERR(bdev))
 		return bdev;
 
+	whole = bd_start_claiming(bdev, holder);
+	if (IS_ERR(whole)) {
+		bdput(bdev);
+		return whole;
+	}
+
 	error = blkdev_get(bdev, mode);
 	if (error)
-		return ERR_PTR(error);
+		goto out_abort_claiming;
+
 	error = -EACCES;
 	if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
-		goto blkdev_put;
-	error = bd_claim(bdev, holder);
-	if (error)
-		goto blkdev_put;
+		goto out_blkdev_put;
 
+	BUG_ON(bd_claim(bdev, holder) != 0);
 	return bdev;
-	
-blkdev_put:
+
+out_blkdev_put:
 	blkdev_put(bdev, mode);
+out_abort_claiming:
+	bd_abort_claiming(whole, holder);
 	return ERR_PTR(error);
 }
 
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b34d32f..c6a4f45 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1589,7 +1589,7 @@
 				u64 start, u64 len)
 {
 	blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
-			     DISCARD_FL_BARRIER);
+			BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 }
 
 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
diff --git a/fs/buffer.c b/fs/buffer.c
index c9c266d..08e422d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -275,6 +275,7 @@
 		return;
 
 	invalidate_bh_lrus();
+	lru_add_drain_all();	/* make sure all lru add caches are flushed */
 	invalidate_mapping_pages(mapping, 0, -1);
 }
 EXPORT_SYMBOL(invalidate_bdev);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3cf038c0..e8766a3 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1332,6 +1332,12 @@
 
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
+		 * skip this group (and avoid loading bitmap) if there
+		 * are no free blocks
+		 */
+		if (!free_blocks)
+			continue;
+		/*
 		 * skip this group if the number of
 		 * free blocks is less than half of the reservation
 		 * window size.
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index ad7d572..f0c5286 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -106,7 +106,7 @@
 	struct super_block * sb = inode->i_sb;
 	int is_directory;
 	unsigned long ino;
-	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *bitmap_bh;
 	unsigned long block_group;
 	unsigned long bit;
 	struct ext2_super_block * es;
@@ -135,14 +135,13 @@
 	    ino > le32_to_cpu(es->s_inodes_count)) {
 		ext2_error (sb, "ext2_free_inode",
 			    "reserved or nonexistent inode %lu", ino);
-		goto error_return;
+		return;
 	}
 	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
 	bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
-	brelse(bitmap_bh);
 	bitmap_bh = read_inode_bitmap(sb, block_group);
 	if (!bitmap_bh)
-		goto error_return;
+		return;
 
 	/* Ok, now we can actually update the inode bitmaps.. */
 	if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
@@ -154,7 +153,7 @@
 	mark_buffer_dirty(bitmap_bh);
 	if (sb->s_flags & MS_SYNCHRONOUS)
 		sync_dirty_buffer(bitmap_bh);
-error_return:
+
 	brelse(bitmap_bh);
 }
 
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fc13cc1..527c46d 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -22,7 +22,6 @@
  *  Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
  */
 
-#include <linux/smp_lock.h>
 #include <linux/time.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
@@ -1406,11 +1405,11 @@
 			       /* If this is the first large file
 				* created, add a flag to the superblock.
 				*/
-				lock_kernel();
+				spin_lock(&EXT2_SB(sb)->s_lock);
 				ext2_update_dynamic_rev(sb);
 				EXT2_SET_RO_COMPAT_FEATURE(sb,
 					EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
-				unlock_kernel();
+				spin_unlock(&EXT2_SB(sb)->s_lock);
 				ext2_write_super(sb);
 			}
 		}
@@ -1467,7 +1466,7 @@
 	if (error)
 		return error;
 
-	if (iattr->ia_valid & ATTR_SIZE)
+	if (is_quota_modification(inode, iattr))
 		dquot_initialize(inode);
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
 	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 42e4a30..71e9eb1 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -26,7 +26,6 @@
 #include <linux/random.h>
 #include <linux/buffer_head.h>
 #include <linux/exportfs.h>
-#include <linux/smp_lock.h>
 #include <linux/vfs.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
@@ -39,7 +38,7 @@
 #include "xip.h"
 
 static void ext2_sync_super(struct super_block *sb,
-			    struct ext2_super_block *es);
+			    struct ext2_super_block *es, int wait);
 static int ext2_remount (struct super_block * sb, int * flags, char * data);
 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
 static int ext2_sync_fs(struct super_block *sb, int wait);
@@ -52,9 +51,11 @@
 	struct ext2_super_block *es = sbi->s_es;
 
 	if (!(sb->s_flags & MS_RDONLY)) {
+		spin_lock(&sbi->s_lock);
 		sbi->s_mount_state |= EXT2_ERROR_FS;
 		es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
-		ext2_sync_super(sb, es);
+		spin_unlock(&sbi->s_lock);
+		ext2_sync_super(sb, es, 1);
 	}
 
 	va_start(args, fmt);
@@ -84,6 +85,9 @@
 	va_end(args);
 }
 
+/*
+ * This must be called with sbi->s_lock held.
+ */
 void ext2_update_dynamic_rev(struct super_block *sb)
 {
 	struct ext2_super_block *es = EXT2_SB(sb)->s_es;
@@ -115,8 +119,6 @@
 	int i;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
 
-	lock_kernel();
-
 	if (sb->s_dirt)
 		ext2_write_super(sb);
 
@@ -124,8 +126,10 @@
 	if (!(sb->s_flags & MS_RDONLY)) {
 		struct ext2_super_block *es = sbi->s_es;
 
+		spin_lock(&sbi->s_lock);
 		es->s_state = cpu_to_le16(sbi->s_mount_state);
-		ext2_sync_super(sb, es);
+		spin_unlock(&sbi->s_lock);
+		ext2_sync_super(sb, es, 1);
 	}
 	db_count = sbi->s_gdb_count;
 	for (i = 0; i < db_count; i++)
@@ -140,8 +144,6 @@
 	sb->s_fs_info = NULL;
 	kfree(sbi->s_blockgroup_lock);
 	kfree(sbi);
-
-	unlock_kernel();
 }
 
 static struct kmem_cache * ext2_inode_cachep;
@@ -209,6 +211,7 @@
 	struct ext2_super_block *es = sbi->s_es;
 	unsigned long def_mount_opts;
 
+	spin_lock(&sbi->s_lock);
 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
 
 	if (sbi->s_sb_block != 1)
@@ -281,6 +284,7 @@
 	if (!test_opt(sb, RESERVATION))
 		seq_puts(seq, ",noreservation");
 
+	spin_unlock(&sbi->s_lock);
 	return 0;
 }
 
@@ -606,7 +610,6 @@
 	if (!le16_to_cpu(es->s_max_mnt_count))
 		es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
 	le16_add_cpu(&es->s_mnt_count, 1);
-	ext2_write_super(sb);
 	if (test_opt (sb, DEBUG))
 		ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
 			"bpg=%lu, ipg=%lu, mo=%04lx]",
@@ -767,6 +770,8 @@
 	sb->s_fs_info = sbi;
 	sbi->s_sb_block = sb_block;
 
+	spin_lock_init(&sbi->s_lock);
+
 	/*
 	 * See what the current blocksize for the device is, and
 	 * use that as the blocksize.  Otherwise (or if the blocksize
@@ -1079,7 +1084,9 @@
 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
 		ext2_msg(sb, KERN_WARNING,
 			"warning: mounting ext3 filesystem as ext2");
-	ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+	if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
+		sb->s_flags |= MS_RDONLY;
+	ext2_write_super(sb);
 	return 0;
 
 cantfind_ext2:
@@ -1120,30 +1127,26 @@
 		 * be remapped.  Nothing we can do but to retry the
 		 * write and hope for the best.
 		 */
-		printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
-		       "superblock detected", sb->s_id);
+		ext2_msg(sb, KERN_ERR,
+		       "previous I/O error to superblock detected\n");
 		clear_buffer_write_io_error(sbh);
 		set_buffer_uptodate(sbh);
 	}
 }
 
-static void ext2_commit_super (struct super_block * sb,
-			       struct ext2_super_block * es)
+static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
+			    int wait)
 {
 	ext2_clear_super_error(sb);
-	es->s_wtime = cpu_to_le32(get_seconds());
-	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
-	sb->s_dirt = 0;
-}
-
-static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
-{
-	ext2_clear_super_error(sb);
+	spin_lock(&EXT2_SB(sb)->s_lock);
 	es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
 	es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
 	es->s_wtime = cpu_to_le32(get_seconds());
+	/* unlock before we do IO */
+	spin_unlock(&EXT2_SB(sb)->s_lock);
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
-	sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
+	if (wait)
+		sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
 	sb->s_dirt = 0;
 }
 
@@ -1157,43 +1160,18 @@
  * may have been checked while mounted and e2fsck may have
  * set s_state to EXT2_VALID_FS after some corrections.
  */
-
 static int ext2_sync_fs(struct super_block *sb, int wait)
 {
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
 	struct ext2_super_block *es = EXT2_SB(sb)->s_es;
-	struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
 
-	lock_kernel();
-	if (buffer_write_io_error(sbh)) {
-		/*
-		 * Oh, dear.  A previous attempt to write the
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		ext2_msg(sb, KERN_ERR,
-		       "previous I/O error to superblock detected\n");
-		clear_buffer_write_io_error(sbh);
-		set_buffer_uptodate(sbh);
-	}
-
+	spin_lock(&sbi->s_lock);
 	if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
 		ext2_debug("setting valid to 0\n");
 		es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
-		es->s_free_blocks_count =
-			cpu_to_le32(ext2_count_free_blocks(sb));
-		es->s_free_inodes_count =
-			cpu_to_le32(ext2_count_free_inodes(sb));
-		es->s_mtime = cpu_to_le32(get_seconds());
-		ext2_sync_super(sb, es);
-	} else {
-		ext2_commit_super(sb, es);
 	}
-	sb->s_dirt = 0;
-	unlock_kernel();
-
+	spin_unlock(&sbi->s_lock);
+	ext2_sync_super(sb, es, wait);
 	return 0;
 }
 
@@ -1215,7 +1193,7 @@
 	unsigned long old_sb_flags;
 	int err;
 
-	lock_kernel();
+	spin_lock(&sbi->s_lock);
 
 	/* Store the old options */
 	old_sb_flags = sb->s_flags;
@@ -1254,13 +1232,13 @@
 		sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
 	}
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
-		unlock_kernel();
+		spin_unlock(&sbi->s_lock);
 		return 0;
 	}
 	if (*flags & MS_RDONLY) {
 		if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
 		    !(sbi->s_mount_state & EXT2_VALID_FS)) {
-			unlock_kernel();
+			spin_unlock(&sbi->s_lock);
 			return 0;
 		}
 		/*
@@ -1269,6 +1247,8 @@
 		 */
 		es->s_state = cpu_to_le16(sbi->s_mount_state);
 		es->s_mtime = cpu_to_le32(get_seconds());
+		spin_unlock(&sbi->s_lock);
+		ext2_sync_super(sb, es, 1);
 	} else {
 		__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
 					       ~EXT2_FEATURE_RO_COMPAT_SUPP);
@@ -1288,16 +1268,16 @@
 		sbi->s_mount_state = le16_to_cpu(es->s_state);
 		if (!ext2_setup_super (sb, es, 0))
 			sb->s_flags &= ~MS_RDONLY;
+		spin_unlock(&sbi->s_lock);
+		ext2_write_super(sb);
 	}
-	ext2_sync_super(sb, es);
-	unlock_kernel();
 	return 0;
 restore_opts:
 	sbi->s_mount_opt = old_opts.s_mount_opt;
 	sbi->s_resuid = old_opts.s_resuid;
 	sbi->s_resgid = old_opts.s_resgid;
 	sb->s_flags = old_sb_flags;
-	unlock_kernel();
+	spin_unlock(&sbi->s_lock);
 	return err;
 }
 
@@ -1308,6 +1288,8 @@
 	struct ext2_super_block *es = sbi->s_es;
 	u64 fsid;
 
+	spin_lock(&sbi->s_lock);
+
 	if (test_opt (sb, MINIX_DF))
 		sbi->s_overhead_last = 0;
 	else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
@@ -1362,6 +1344,7 @@
 	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
 	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
 	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
+	spin_unlock(&sbi->s_lock);
 	return 0;
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index e44dc92..3b96045 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -345,7 +345,9 @@
 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
+	spin_lock(&EXT2_SB(sb)->s_lock);
 	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
+	spin_unlock(&EXT2_SB(sb)->s_lock);
 	sb->s_dirt = 1;
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
 }
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a177122..4a32511 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1584,6 +1584,12 @@
 			goto io_error;
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
+		 * skip this group (and avoid loading bitmap) if there
+		 * are no free blocks
+		 */
+		if (!free_blocks)
+			continue;
+		/*
 		 * skip this group if the number of
 		 * free blocks is less than half of the reservation
 		 * window size.
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 8209f26..fcf7487 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -48,7 +48,7 @@
 	struct inode *inode = dentry->d_inode;
 	struct ext3_inode_info *ei = EXT3_I(inode);
 	journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-	int ret = 0;
+	int ret, needs_barrier = 0;
 	tid_t commit_tid;
 
 	if (inode->i_sb->s_flags & MS_RDONLY)
@@ -70,28 +70,27 @@
 	 *  (they were dirtied by commit).  But that's OK - the blocks are
 	 *  safe in-journal, which is all fsync() needs to ensure.
 	 */
-	if (ext3_should_journal_data(inode)) {
-		ret = ext3_force_commit(inode->i_sb);
-		goto out;
-	}
+	if (ext3_should_journal_data(inode))
+		return ext3_force_commit(inode->i_sb);
 
 	if (datasync)
 		commit_tid = atomic_read(&ei->i_datasync_tid);
 	else
 		commit_tid = atomic_read(&ei->i_sync_tid);
 
-	if (log_start_commit(journal, commit_tid)) {
-		log_wait_commit(journal, commit_tid);
-		goto out;
-	}
+	if (test_opt(inode->i_sb, BARRIER) &&
+	    !journal_trans_will_send_data_barrier(journal, commit_tid))
+		needs_barrier = 1;
+	log_start_commit(journal, commit_tid);
+	ret = log_wait_commit(journal, commit_tid);
 
 	/*
 	 * In case we didn't commit a transaction, we have to flush
 	 * disk caches manually so that data really is on persistent
 	 * storage
 	 */
-	if (test_opt(inode->i_sb, BARRIER))
-		blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
-out:
+	if (needs_barrier)
+		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
+				BLKDEV_IFL_WAIT);
 	return ret;
 }
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ea33bdf..735f019 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3151,7 +3151,7 @@
 	if (error)
 		return error;
 
-	if (ia_valid & ATTR_SIZE)
+	if (is_quota_modification(inode, attr))
 		dquot_initialize(inode);
 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 1bee604..0fc1293 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -653,8 +653,12 @@
 		seq_printf(seq, ",commit=%u",
 			   (unsigned) (sbi->s_commit_interval / HZ));
 	}
-	if (test_opt(sb, BARRIER))
-		seq_puts(seq, ",barrier=1");
+
+	/*
+	 * Always display barrier state so it's clear what the status is.
+	 */
+	seq_puts(seq, ",barrier=");
+	seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
 	if (test_opt(sb, NOBH))
 		seq_puts(seq, ",nobh");
 
@@ -810,8 +814,8 @@
 	Opt_data_err_abort, Opt_data_err_ignore,
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
-	Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
-	Opt_usrquota, Opt_grpquota
+	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
+	Opt_resize, Opt_usrquota, Opt_grpquota
 };
 
 static const match_table_t tokens = {
@@ -865,6 +869,8 @@
 	{Opt_quota, "quota"},
 	{Opt_usrquota, "usrquota"},
 	{Opt_barrier, "barrier=%u"},
+	{Opt_barrier, "barrier"},
+	{Opt_nobarrier, "nobarrier"},
 	{Opt_resize, "resize"},
 	{Opt_err, NULL},
 };
@@ -967,7 +973,11 @@
 		int token;
 		if (!*p)
 			continue;
-
+		/*
+		 * Initialize args struct so we know whether arg was
+		 * found; some options take optional arguments.
+		 */
+		args[0].to = args[0].from = 0;
 		token = match_token(p, tokens, args);
 		switch (token) {
 		case Opt_bsd_df:
@@ -1215,9 +1225,15 @@
 		case Opt_abort:
 			set_opt(sbi->s_mount_opt, ABORT);
 			break;
+		case Opt_nobarrier:
+			clear_opt(sbi->s_mount_opt, BARRIER);
+			break;
 		case Opt_barrier:
-			if (match_int(&args[0], &option))
-				return 0;
+			if (args[0].from) {
+				if (match_int(&args[0], &option))
+					return 0;
+			} else
+				option = 1;	/* No argument, default to 1 */
 			if (option)
 				set_opt(sbi->s_mount_opt, BARRIER);
 			else
@@ -1890,21 +1906,6 @@
 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 	spin_lock_init(&sbi->s_next_gen_lock);
 
-	err = percpu_counter_init(&sbi->s_freeblocks_counter,
-			ext3_count_free_blocks(sb));
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-				ext3_count_free_inodes(sb));
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-				ext3_count_dirs(sb));
-	}
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error: insufficient memory");
-		goto failed_mount3;
-	}
-
 	/* per fileystem reservation list head & lock */
 	spin_lock_init(&sbi->s_rsv_window_lock);
 	sbi->s_rsv_window_root = RB_ROOT;
@@ -1945,15 +1946,29 @@
 	if (!test_opt(sb, NOLOAD) &&
 	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
 		if (ext3_load_journal(sb, es, journal_devnum))
-			goto failed_mount3;
+			goto failed_mount2;
 	} else if (journal_inum) {
 		if (ext3_create_journal(sb, es, journal_inum))
-			goto failed_mount3;
+			goto failed_mount2;
 	} else {
 		if (!silent)
 			ext3_msg(sb, KERN_ERR,
 				"error: no journal found. "
 				"mounting ext3 over ext2?");
+		goto failed_mount2;
+	}
+	err = percpu_counter_init(&sbi->s_freeblocks_counter,
+			ext3_count_free_blocks(sb));
+	if (!err) {
+		err = percpu_counter_init(&sbi->s_freeinodes_counter,
+				ext3_count_free_inodes(sb));
+	}
+	if (!err) {
+		err = percpu_counter_init(&sbi->s_dirs_counter,
+				ext3_count_dirs(sb));
+	}
+	if (err) {
+		ext3_msg(sb, KERN_ERR, "error: insufficient memory");
 		goto failed_mount3;
 	}
 
@@ -1978,7 +1993,7 @@
 			ext3_msg(sb, KERN_ERR,
 				"error: journal does not support "
 				"requested data journaling mode");
-			goto failed_mount4;
+			goto failed_mount3;
 		}
 	default:
 		break;
@@ -2001,19 +2016,19 @@
 	if (IS_ERR(root)) {
 		ext3_msg(sb, KERN_ERR, "error: get root inode failed");
 		ret = PTR_ERR(root);
-		goto failed_mount4;
+		goto failed_mount3;
 	}
 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
 		iput(root);
 		ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
-		goto failed_mount4;
+		goto failed_mount3;
 	}
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
 		ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
 		iput(root);
 		ret = -ENOMEM;
-		goto failed_mount4;
+		goto failed_mount3;
 	}
 
 	ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -2039,12 +2054,11 @@
 		       sb->s_id);
 	goto failed_mount;
 
-failed_mount4:
-	journal_destroy(sbi->s_journal);
 failed_mount3:
 	percpu_counter_destroy(&sbi->s_freeblocks_counter);
 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
 	percpu_counter_destroy(&sbi->s_dirs_counter);
+	journal_destroy(sbi->s_journal);
 failed_mount2:
 	for (i = 0; i < db_count; i++)
 		brelse(sbi->s_group_desc[i]);
@@ -2278,6 +2292,9 @@
 			return -EINVAL;
 	}
 
+	if (!(journal->j_flags & JFS_BARRIER))
+		printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
+
 	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
 		err = journal_update_format(journal);
 		if (err)  {
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 0d0c323..ef3d980 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -100,9 +100,11 @@
 		if (ext4_should_writeback_data(inode) &&
 		    (journal->j_fs_dev != journal->j_dev) &&
 		    (journal->j_flags & JBD2_BARRIER))
-			blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+			blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+					NULL, BLKDEV_IFL_WAIT);
 		jbd2_log_wait_commit(journal, commit_tid);
 	} else if (journal->j_flags & JBD2_BARRIER)
-		blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
+			BLKDEV_IFL_WAIT);
 	return ret;
 }
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 81d6054..3e0f6af 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5425,7 +5425,7 @@
 	if (error)
 		return error;
 
-	if (ia_valid & ATTR_SIZE)
+	if (is_quota_modification(inode, attr))
 		dquot_initialize(inode);
 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 0a14074..f74d270 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -14,6 +14,7 @@
 #include <linux/dnotify.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pipe_fs_i.h>
 #include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/signal.h>
@@ -412,6 +413,10 @@
 	case F_NOTIFY:
 		err = fcntl_dirnotify(fd, filp, arg);
 		break;
+	case F_SETPIPE_SZ:
+	case F_GETPIPE_SZ:
+		err = pipe_fcntl(filp, cmd, arg);
+		break;
 	default:
 		break;
 	}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 4b37f7c..437a743 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -45,6 +45,7 @@
 	int for_kupdate:1;
 	int range_cyclic:1;
 	int for_background:1;
+	int sb_pinned:1;
 };
 
 /*
@@ -192,7 +193,8 @@
 }
 
 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
-				 struct wb_writeback_args *args)
+				 struct wb_writeback_args *args,
+				 int wait)
 {
 	struct bdi_work *work;
 
@@ -204,6 +206,8 @@
 	if (work) {
 		bdi_work_init(work, args);
 		bdi_queue_work(bdi, work);
+		if (wait)
+			bdi_wait_on_work_clear(work);
 	} else {
 		struct bdi_writeback *wb = &bdi->wb;
 
@@ -230,6 +234,11 @@
 		.sync_mode	= WB_SYNC_ALL,
 		.nr_pages	= LONG_MAX,
 		.range_cyclic	= 0,
+		/*
+		 * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
+		 * lets make it explicitly clear.
+		 */
+		.sb_pinned	= 1,
 	};
 	struct bdi_work work;
 
@@ -245,21 +254,23 @@
  * @bdi: the backing device to write from
  * @sb: write inodes from this super_block
  * @nr_pages: the number of pages to write
+ * @sb_locked: caller already holds sb umount sem.
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
  *   started when this function returns, we make no guarentees on
- *   completion. Caller need not hold sb s_umount semaphore.
+ *   completion. Caller specifies whether sb umount sem is held already or not.
  *
  */
 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
-			 long nr_pages)
+			 long nr_pages, int sb_locked)
 {
 	struct wb_writeback_args args = {
 		.sb		= sb,
 		.sync_mode	= WB_SYNC_NONE,
 		.nr_pages	= nr_pages,
 		.range_cyclic	= 1,
+		.sb_pinned	= sb_locked,
 	};
 
 	/*
@@ -271,7 +282,7 @@
 		args.for_background = 1;
 	}
 
-	bdi_alloc_queue_work(bdi, &args);
+	bdi_alloc_queue_work(bdi, &args, sb_locked);
 }
 
 /*
@@ -452,11 +463,9 @@
 
 	BUG_ON(inode->i_state & I_SYNC);
 
-	/* Set I_SYNC, reset I_DIRTY */
-	dirty = inode->i_state & I_DIRTY;
+	/* Set I_SYNC, reset I_DIRTY_PAGES */
 	inode->i_state |= I_SYNC;
-	inode->i_state &= ~I_DIRTY;
-
+	inode->i_state &= ~I_DIRTY_PAGES;
 	spin_unlock(&inode_lock);
 
 	ret = do_writepages(mapping, wbc);
@@ -472,6 +481,15 @@
 			ret = err;
 	}
 
+	/*
+	 * Some filesystems may redirty the inode during the writeback
+	 * due to delalloc, clear dirty metadata flags right before
+	 * write_inode()
+	 */
+	spin_lock(&inode_lock);
+	dirty = inode->i_state & I_DIRTY;
+	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+	spin_unlock(&inode_lock);
 	/* Don't write the inode if only I_DIRTY_PAGES was set */
 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
 		int err = write_inode(inode, wbc);
@@ -577,7 +595,7 @@
 	/*
 	 * Caller must already hold the ref for this
 	 */
-	if (wbc->sync_mode == WB_SYNC_ALL) {
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
 		WARN_ON(!rwsem_is_locked(&sb->s_umount));
 		return SB_NOT_PINNED;
 	}
@@ -751,6 +769,7 @@
 		.for_kupdate		= args->for_kupdate,
 		.for_background		= args->for_background,
 		.range_cyclic		= args->range_cyclic,
+		.sb_pinned		= args->sb_pinned,
 	};
 	unsigned long oldest_jif;
 	long wrote = 0;
@@ -852,6 +871,12 @@
 	unsigned long expired;
 	long nr_pages;
 
+	/*
+	 * When set to zero, disable periodic writeback
+	 */
+	if (!dirty_writeback_interval)
+		return 0;
+
 	expired = wb->last_old_flush +
 			msecs_to_jiffies(dirty_writeback_interval * 10);
 	if (time_before(jiffies, expired))
@@ -887,6 +912,7 @@
 
 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
 		struct wb_writeback_args args = work->args;
+		int post_clear;
 
 		/*
 		 * Override sync mode, in case we must wait for completion
@@ -894,11 +920,13 @@
 		if (force_wait)
 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
 
+		post_clear = WB_SYNC_ALL || args.sb_pinned;
+
 		/*
 		 * If this isn't a data integrity operation, just notify
 		 * that we have seen this work and we are now starting it.
 		 */
-		if (args.sync_mode == WB_SYNC_NONE)
+		if (!post_clear)
 			wb_clear_pending(wb, work);
 
 		wrote += wb_writeback(wb, &args);
@@ -907,7 +935,7 @@
 		 * This is a data integrity writeback, so only do the
 		 * notification when we have completed the work.
 		 */
-		if (args.sync_mode == WB_SYNC_ALL)
+		if (post_clear)
 			wb_clear_pending(wb, work);
 	}
 
@@ -947,8 +975,17 @@
 				break;
 		}
 
-		wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
-		schedule_timeout_interruptible(wait_jiffies);
+		if (dirty_writeback_interval) {
+			wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+			schedule_timeout_interruptible(wait_jiffies);
+		} else {
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (list_empty_careful(&wb->bdi->work_list) &&
+			    !kthread_should_stop())
+				schedule();
+			__set_current_state(TASK_RUNNING);
+		}
+
 		try_to_freeze();
 	}
 
@@ -974,7 +1011,7 @@
 		if (!bdi_has_dirty_io(bdi))
 			continue;
 
-		bdi_alloc_queue_work(bdi, &args);
+		bdi_alloc_queue_work(bdi, &args, 0);
 	}
 
 	rcu_read_unlock();
@@ -1183,6 +1220,18 @@
 	iput(old_inode);
 }
 
+static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
+{
+	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
+	long nr_to_write;
+
+	nr_to_write = nr_dirty + nr_unstable +
+			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
+
+	bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
+}
+
 /**
  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
  * @sb: the superblock
@@ -1194,18 +1243,23 @@
  */
 void writeback_inodes_sb(struct super_block *sb)
 {
-	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
-	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
-	long nr_to_write;
-
-	nr_to_write = nr_dirty + nr_unstable +
-			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
-
-	bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
+	__writeback_inodes_sb(sb, 0);
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
 /**
+ * writeback_inodes_sb_locked	- writeback dirty inodes from given super_block
+ * @sb: the superblock
+ *
+ * Like writeback_inodes_sb(), except the caller already holds the
+ * sb umount sem.
+ */
+void writeback_inodes_sb_locked(struct super_block *sb)
+{
+	__writeback_inodes_sb(sb, 1);
+}
+
+/**
  * writeback_inodes_sb_if_idle	-	start writeback if none underway
  * @sb: the superblock
  *
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index d5f4661..49667d6 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1476,8 +1476,8 @@
 	return 0;
 }
 
-static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
-			   struct fs_disk_quota *fdq)
+static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
+			  struct fs_disk_quota *fdq)
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct gfs2_quota_lvb *qlvb;
@@ -1521,8 +1521,8 @@
 /* GFS2 only supports a subset of the XFS fields */
 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
 
-static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
-			   struct fs_disk_quota *fdq)
+static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
+			  struct fs_disk_quota *fdq)
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1629,7 +1629,7 @@
 const struct quotactl_ops gfs2_quotactl_ops = {
 	.quota_sync     = gfs2_quota_sync,
 	.get_xstate     = gfs2_quota_get_xstate,
-	.get_xquota	= gfs2_xquota_get,
-	.set_xquota	= gfs2_xquota_set,
+	.get_dqblk	= gfs2_get_dqblk,
+	.set_dqblk	= gfs2_set_dqblk,
 };
 
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8bce73e..117fa41 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -854,7 +854,8 @@
 				if ((start + nr_sects) != blk) {
 					rv = blkdev_issue_discard(bdev, start,
 							    nr_sects, GFP_NOFS,
-							    DISCARD_FL_BARRIER);
+							    BLKDEV_IFL_WAIT |
+							    BLKDEV_IFL_BARRIER);
 					if (rv)
 						goto fail;
 					nr_sects = 0;
@@ -869,7 +870,7 @@
 	}
 	if (nr_sects) {
 		rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS,
-					 DISCARD_FL_BARRIER);
+					 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 		if (rv)
 			goto fail;
 	}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index ecb44c9..28a9dda 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -786,6 +786,12 @@
 
 	jbd_debug(3, "JBD: commit phase 6\n");
 
+	/* All metadata is written, now write commit record and do cleanup */
+	spin_lock(&journal->j_state_lock);
+	J_ASSERT(commit_transaction->t_state == T_COMMIT);
+	commit_transaction->t_state = T_COMMIT_RECORD;
+	spin_unlock(&journal->j_state_lock);
+
 	if (journal_write_commit_record(journal, commit_transaction))
 		err = -EIO;
 
@@ -923,7 +929,7 @@
 
 	jbd_debug(3, "JBD: commit phase 8\n");
 
-	J_ASSERT(commit_transaction->t_state == T_COMMIT);
+	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
 
 	commit_transaction->t_state = T_FINISHED;
 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index bd224ee..93d1e47 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -565,6 +565,38 @@
 }
 
 /*
+ * Return 1 if a given transaction has not yet sent barrier request
+ * connected with a transaction commit. If 0 is returned, transaction
+ * may or may not have sent the barrier. Used to avoid sending barrier
+ * twice in common cases.
+ */
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
+{
+	int ret = 0;
+	transaction_t *commit_trans;
+
+	if (!(journal->j_flags & JFS_BARRIER))
+		return 0;
+	spin_lock(&journal->j_state_lock);
+	/* Transaction already committed? */
+	if (tid_geq(journal->j_commit_sequence, tid))
+		goto out;
+	/*
+	 * Transaction is being committed and we already proceeded to
+	 * writing commit record?
+	 */
+	commit_trans = journal->j_committing_transaction;
+	if (commit_trans && commit_trans->t_tid == tid &&
+	    commit_trans->t_state >= T_COMMIT_RECORD)
+		goto out;
+	ret = 1;
+out:
+	spin_unlock(&journal->j_state_lock);
+	return ret;
+}
+EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
+
+/*
  * Log buffer allocation routines:
  */
 
@@ -1157,6 +1189,7 @@
 {
 	int err = 0;
 
+	
 	/* Wait for the commit thread to wake up and die. */
 	journal_kill_thread(journal);
 
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 30beb11..076d1cc 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -530,7 +530,8 @@
 	 */
 	if ((journal->j_fs_dev != journal->j_dev) &&
 	    (journal->j_flags & JBD2_BARRIER))
-		blkdev_issue_flush(journal->j_fs_dev, NULL);
+		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
+			BLKDEV_IFL_WAIT);
 	if (!(journal->j_flags & JBD2_ABORT))
 		jbd2_journal_update_superblock(journal, 1);
 	return 0;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 671da7f..75716d3 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -717,7 +717,8 @@
 	if (commit_transaction->t_flushed_data_blocks &&
 	    (journal->j_fs_dev != journal->j_dev) &&
 	    (journal->j_flags & JBD2_BARRIER))
-		blkdev_issue_flush(journal->j_fs_dev, NULL);
+		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
+			BLKDEV_IFL_WAIT);
 
 	/* Done it all: now write the commit record asynchronously. */
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -727,7 +728,8 @@
 		if (err)
 			__jbd2_journal_abort_hard(journal);
 		if (journal->j_flags & JBD2_BARRIER)
-			blkdev_issue_flush(journal->j_dev, NULL);
+			blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
+				BLKDEV_IFL_WAIT);
 	}
 
 	err = journal_finish_inode_data_buffers(journal, commit_transaction);
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 14ba982..85d9ec6 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -98,7 +98,7 @@
 	if (rc)
 		return rc;
 
-	if (iattr->ia_valid & ATTR_SIZE)
+	if (is_quota_modification(inode, iattr))
 		dquot_initialize(inode);
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
 	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index a756168..8c10973 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -674,7 +674,7 @@
 						   start * sects_per_block,
 						   nblocks * sects_per_block,
 						   GFP_NOFS,
-						   DISCARD_FL_BARRIER);
+						   BLKDEV_IFL_BARRIER);
 			if (ret < 0)
 				return ret;
 			nblocks = 0;
@@ -684,7 +684,7 @@
 		ret = blkdev_issue_discard(nilfs->ns_bdev,
 					   start * sects_per_block,
 					   nblocks * sects_per_block,
-					   GFP_NOFS, DISCARD_FL_BARRIER);
+					   GFP_NOFS, BLKDEV_IFL_BARRIER);
 	return ret;
 }
 
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 50c4ee8..39eb16a 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3897,7 +3897,8 @@
 		oinfo->dqi_gi.dqi_free_entry =
 					be32_to_cpu(lvb->lvb_free_entry);
 	} else {
-		status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh);
+		status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
+						     oinfo->dqi_giblk, &bh);
 		if (status) {
 			mlog_errno(status);
 			goto bail;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f74f140..97e54b9 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -933,9 +933,8 @@
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	struct buffer_head *bh = NULL;
 	handle_t *handle = NULL;
-	int qtype;
-	struct dquot *transfer_from[MAXQUOTAS] = { };
 	struct dquot *transfer_to[MAXQUOTAS] = { };
+	int qtype;
 
 	mlog_entry("(0x%p, '%.*s')\n", dentry,
 	           dentry->d_name.len, dentry->d_name.name);
@@ -966,10 +965,10 @@
 	if (status)
 		return status;
 
+	if (is_quota_modification(inode, attr))
+		dquot_initialize(inode);
 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
 	if (size_change) {
-		dquot_initialize(inode);
-
 		status = ocfs2_rw_lock(inode, 1);
 		if (status < 0) {
 			mlog_errno(status);
@@ -1019,9 +1018,7 @@
 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
 			transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
 						      USRQUOTA);
-			transfer_from[USRQUOTA] = dqget(sb, inode->i_uid,
-							USRQUOTA);
-			if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
+			if (!transfer_to[USRQUOTA]) {
 				status = -ESRCH;
 				goto bail_unlock;
 			}
@@ -1031,9 +1028,7 @@
 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
 			transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
 						      GRPQUOTA);
-			transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid,
-							GRPQUOTA);
-			if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
+			if (!transfer_to[GRPQUOTA]) {
 				status = -ESRCH;
 				goto bail_unlock;
 			}
@@ -1045,7 +1040,7 @@
 			mlog_errno(status);
 			goto bail_unlock;
 		}
-		status = dquot_transfer(inode, attr);
+		status = __dquot_transfer(inode, transfer_to);
 		if (status < 0)
 			goto bail_commit;
 	} else {
@@ -1085,10 +1080,8 @@
 	brelse(bh);
 
 	/* Release quota pointers in case we acquired them */
-	for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
+	for (qtype = 0; qtype < MAXQUOTAS; qtype++)
 		dqput(transfer_to[qtype]);
-		dqput(transfer_from[qtype]);
-	}
 
 	if (!status && attr->ia_valid & ATTR_MODE) {
 		status = ocfs2_acl_chmod(inode);
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 123bc52..196fcb5 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -23,6 +23,7 @@
 struct ocfs2_dquot {
 	struct dquot dq_dquot;	/* Generic VFS dquot */
 	loff_t dq_local_off;	/* Offset in the local quota file */
+	u64 dq_local_phys_blk;	/* Physical block carrying quota structure */
 	struct ocfs2_quota_chunk *dq_chunk;	/* Chunk dquot is in */
 	unsigned int dq_use_count;	/* Number of nodes having reference to this entry in global quota file */
 	s64 dq_origspace;	/* Last globally synced space usage */
@@ -51,8 +52,9 @@
 	struct ocfs2_lock_res dqi_gqlock;	/* Lock protecting quota information structure */
 	struct buffer_head *dqi_gqi_bh;	/* Buffer head with global quota file inode - set only if inode lock is obtained */
 	int dqi_gqi_count;		/* Number of holders of dqi_gqi_bh */
+	u64 dqi_giblk;			/* Number of block with global information header */
 	struct buffer_head *dqi_lqi_bh;	/* Buffer head with local quota file inode */
-	struct buffer_head *dqi_ibh;	/* Buffer with information header */
+	struct buffer_head *dqi_libh;	/* Buffer with local information header */
 	struct qtree_mem_dqinfo dqi_gi;	/* Info about global file */
 	struct delayed_work dqi_sync_work;	/* Work for syncing dquots */
 	struct ocfs2_quota_recovery *dqi_rec;	/* Pointer to recovery
@@ -102,8 +104,12 @@
 
 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
-int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
-			   struct buffer_head **bh);
+int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh);
+int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
+				struct buffer_head **bh);
+int ocfs2_create_local_dquot(struct dquot *dquot);
+int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot);
+int ocfs2_local_write_dquot(struct dquot *dquot);
 
 extern const struct dquot_operations ocfs2_quota_operations;
 extern struct quota_format_type ocfs2_quota_format;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 04ae76d..2bb35fe 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -25,8 +25,44 @@
 #include "dlmglue.h"
 #include "uptodate.h"
 #include "super.h"
+#include "buffer_head_io.h"
 #include "quota.h"
 
+/*
+ * Locking of quotas with OCFS2 is rather complex. Here are rules that
+ * should be obeyed by all the functions:
+ * - any write of quota structure (either to local or global file) is protected
+ *   by dqio_mutex or dquot->dq_lock.
+ * - any modification of global quota file holds inode cluster lock, i_mutex,
+ *   and ip_alloc_sem of the global quota file (achieved by
+ *   ocfs2_lock_global_qf). It also has to hold qinfo_lock.
+ * - an allocation of new blocks for local quota file is protected by
+ *   its ip_alloc_sem
+ *
+ * A rough sketch of locking dependencies (lf = local file, gf = global file):
+ * Normal filesystem operation:
+ *   start_trans -> dqio_mutex -> write to lf
+ * Syncing of local and global file:
+ *   ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
+ *     write to gf
+ *						       -> write to lf
+ * Acquire dquot for the first time:
+ *   dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
+ *				     -> alloc space for gf
+ *				     -> start_trans -> qinfo_lock -> write to gf
+ *	     -> ip_alloc_sem of lf -> alloc space for lf
+ *	     -> write to lf
+ * Release last reference to dquot:
+ *   dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
+ *	     -> write to lf
+ * Note that all the above operations also hold the inode cluster lock of lf.
+ * Recovery:
+ *   inode cluster lock of recovered lf
+ *     -> read bitmaps -> ip_alloc_sem of lf
+ *     -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
+ *        write to gf
+ */
+
 static struct workqueue_struct *ocfs2_quota_wq = NULL;
 
 static void qsync_work_fn(struct work_struct *work);
@@ -91,8 +127,7 @@
 	.is_id = ocfs2_global_is_id,
 };
 
-static int ocfs2_validate_quota_block(struct super_block *sb,
-				      struct buffer_head *bh)
+int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
 {
 	struct ocfs2_disk_dqtrailer *dqt =
 		ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
@@ -110,54 +145,19 @@
 	return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
 }
 
-int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
-			   struct buffer_head **bh)
+int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
+				struct buffer_head **bhp)
 {
-	int rc = 0;
-	struct buffer_head *tmp = *bh;
+	int rc;
 
-	if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
-		ocfs2_error(inode->i_sb,
-			    "Quota file %llu is probably corrupted! Requested "
-			    "to read block %Lu but file has size only %Lu\n",
-			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
-			    (unsigned long long)v_block,
-			    (unsigned long long)i_size_read(inode));
-		return -EIO;
-	}
-	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
-				    ocfs2_validate_quota_block);
+	*bhp = NULL;
+	rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
+			       ocfs2_validate_quota_block);
 	if (rc)
 		mlog_errno(rc);
-
-	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
-	if (!rc && !*bh)
-		*bh = tmp;
-
 	return rc;
 }
 
-static int ocfs2_get_quota_block(struct inode *inode, int block,
-				 struct buffer_head **bh)
-{
-	u64 pblock, pcount;
-	int err;
-
-	down_read(&OCFS2_I(inode)->ip_alloc_sem);
-	err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
-	up_read(&OCFS2_I(inode)->ip_alloc_sem);
-	if (err) {
-		mlog_errno(err);
-		return err;
-	}
-	*bh = sb_getblk(inode->i_sb, pblock);
-	if (!*bh) {
-		err = -EIO;
-		mlog_errno(err);
-	}
-	return err;
-}
-
 /* Read data from global quotafile - avoid pagecache and such because we cannot
  * afford acquiring the locks... We use quota cluster lock to serialize
  * operations. Caller is responsible for acquiring it. */
@@ -172,6 +172,7 @@
 	int err = 0;
 	struct buffer_head *bh;
 	size_t toread, tocopy;
+	u64 pblock = 0, pcount = 0;
 
 	if (off > i_size)
 		return 0;
@@ -180,8 +181,19 @@
 	toread = len;
 	while (toread > 0) {
 		tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
+		if (!pcount) {
+			err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
+							  &pcount, NULL);
+			if (err) {
+				mlog_errno(err);
+				return err;
+			}
+		} else {
+			pcount--;
+			pblock++;
+		}
 		bh = NULL;
-		err = ocfs2_read_quota_block(gqinode, blk, &bh);
+		err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
 		if (err) {
 			mlog_errno(err);
 			return err;
@@ -209,6 +221,7 @@
 	int err = 0, new = 0, ja_type;
 	struct buffer_head *bh = NULL;
 	handle_t *handle = journal_current_handle();
+	u64 pblock, pcount;
 
 	if (!handle) {
 		mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
@@ -221,12 +234,11 @@
 		len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
 	}
 
-	mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
 	if (gqinode->i_size < off + len) {
 		loff_t rounded_end =
 				ocfs2_align_bytes_to_blocks(sb, off + len);
 
-		/* Space is already allocated in ocfs2_global_read_dquot() */
+		/* Space is already allocated in ocfs2_acquire_dquot() */
 		err = ocfs2_simple_size_update(gqinode,
 					       oinfo->dqi_gqi_bh,
 					       rounded_end);
@@ -234,13 +246,20 @@
 			goto out;
 		new = 1;
 	}
+	err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
+	if (err) {
+		mlog_errno(err);
+		goto out;
+	}
 	/* Not rewriting whole block? */
 	if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
 	    !new) {
-		err = ocfs2_read_quota_block(gqinode, blk, &bh);
+		err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
 		ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
 	} else {
-		err = ocfs2_get_quota_block(gqinode, blk, &bh);
+		bh = sb_getblk(sb, pblock);
+		if (!bh)
+			err = -ENOMEM;
 		ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
 	}
 	if (err) {
@@ -265,13 +284,11 @@
 	brelse(bh);
 out:
 	if (err) {
-		mutex_unlock(&gqinode->i_mutex);
 		mlog_errno(err);
 		return err;
 	}
 	gqinode->i_version++;
 	ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
-	mutex_unlock(&gqinode->i_mutex);
 	return len;
 }
 
@@ -289,11 +306,23 @@
 	else
 		WARN_ON(bh != oinfo->dqi_gqi_bh);
 	spin_unlock(&dq_data_lock);
+	if (ex) {
+		mutex_lock(&oinfo->dqi_gqinode->i_mutex);
+		down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+	} else {
+		down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+	}
 	return 0;
 }
 
 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
 {
+	if (ex) {
+		up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+		mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
+	} else {
+		up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+	}
 	ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
 	brelse(oinfo->dqi_gqi_bh);
 	spin_lock(&dq_data_lock);
@@ -311,6 +340,7 @@
 	struct ocfs2_global_disk_dqinfo dinfo;
 	struct mem_dqinfo *info = sb_dqinfo(sb, type);
 	struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
+	u64 pcount;
 	int status;
 
 	mlog_entry_void();
@@ -337,9 +367,19 @@
 		mlog_errno(status);
 		goto out_err;
 	}
+
+	status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+					     &pcount, NULL);
+	if (status < 0)
+		goto out_unlock;
+
+	status = ocfs2_qinfo_lock(oinfo, 0);
+	if (status < 0)
+		goto out_unlock;
 	status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
 				      sizeof(struct ocfs2_global_disk_dqinfo),
 				      OCFS2_GLOBAL_INFO_OFF);
+	ocfs2_qinfo_unlock(oinfo, 0);
 	ocfs2_unlock_global_qf(oinfo, 0);
 	if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
 		mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
@@ -366,6 +406,10 @@
 out_err:
 	mlog_exit(status);
 	return status;
+out_unlock:
+	ocfs2_unlock_global_qf(oinfo, 0);
+	mlog_errno(status);
+	goto out_err;
 }
 
 /* Write information to global quota file. Expects exlusive lock on quota
@@ -424,78 +468,10 @@
 
 static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
 {
-	/* We modify all the allocated blocks, tree root, and info block */
+	/* We modify all the allocated blocks, tree root, info block and
+	 * the inode */
 	return (ocfs2_global_qinit_alloc(sb, type) + 2) *
-			OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
-}
-
-/* Read in information from global quota file and acquire a reference to it.
- * dquot_acquire() has already started the transaction and locked quota file */
-int ocfs2_global_read_dquot(struct dquot *dquot)
-{
-	int err, err2, ex = 0;
-	struct super_block *sb = dquot->dq_sb;
-	int type = dquot->dq_type;
-	struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
-	struct ocfs2_super *osb = OCFS2_SB(sb);
-	struct inode *gqinode = info->dqi_gqinode;
-	int need_alloc = ocfs2_global_qinit_alloc(sb, type);
-	handle_t *handle = NULL;
-
-	err = ocfs2_qinfo_lock(info, 0);
-	if (err < 0)
-		goto out;
-	err = qtree_read_dquot(&info->dqi_gi, dquot);
-	if (err < 0)
-		goto out_qlock;
-	OCFS2_DQUOT(dquot)->dq_use_count++;
-	OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
-	OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
-	ocfs2_qinfo_unlock(info, 0);
-
-	if (!dquot->dq_off) {	/* No real quota entry? */
-		ex = 1;
-		/*
-		 * Add blocks to quota file before we start a transaction since
-		 * locking allocators ranks above a transaction start
-		 */
-		WARN_ON(journal_current_handle());
-		down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
-		err = ocfs2_extend_no_holes(gqinode,
-			gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
-			gqinode->i_size);
-		up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
-		if (err < 0)
-			goto out;
-	}
-
-	handle = ocfs2_start_trans(osb,
-				   ocfs2_calc_global_qinit_credits(sb, type));
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		goto out;
-	}
-	err = ocfs2_qinfo_lock(info, ex);
-	if (err < 0)
-		goto out_trans;
-	err = qtree_write_dquot(&info->dqi_gi, dquot);
-	if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
-		err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
-		if (!err)
-			err = err2;
-	}
-out_qlock:
-	if (ex)
-		ocfs2_qinfo_unlock(info, 1);
-	else
-		ocfs2_qinfo_unlock(info, 0);
-out_trans:
-	if (handle)
-		ocfs2_commit_trans(osb, handle);
-out:
-	if (err < 0)
-		mlog_errno(err);
-	return err;
+			OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
 }
 
 /* Sync local information about quota modifications with global quota file.
@@ -636,14 +612,13 @@
 	}
 	mutex_lock(&sb_dqopt(sb)->dqio_mutex);
 	status = ocfs2_sync_dquot(dquot);
-	mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
 	if (status < 0)
 		mlog_errno(status);
 	/* We have to write local structure as well... */
-	dquot_mark_dquot_dirty(dquot);
-	status = dquot_commit(dquot);
+	status = ocfs2_local_write_dquot(dquot);
 	if (status < 0)
 		mlog_errno(status);
+	mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
 	ocfs2_commit_trans(osb, handle);
 out_ilock:
 	ocfs2_unlock_global_qf(oinfo, 1);
@@ -682,7 +657,9 @@
 		mlog_errno(status);
 		goto out;
 	}
-	status = dquot_commit(dquot);
+	mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
+	status = ocfs2_local_write_dquot(dquot);
+	mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
 	ocfs2_commit_trans(osb, handle);
 out:
 	mlog_exit(status);
@@ -713,6 +690,10 @@
 
 	mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
 
+	mutex_lock(&dquot->dq_lock);
+	/* Check whether we are not racing with some other dqget() */
+	if (atomic_read(&dquot->dq_count) > 1)
+		goto out;
 	status = ocfs2_lock_global_qf(oinfo, 1);
 	if (status < 0)
 		goto out;
@@ -723,30 +704,113 @@
 		mlog_errno(status);
 		goto out_ilock;
 	}
-	status = dquot_release(dquot);
+
+	status = ocfs2_global_release_dquot(dquot);
+	if (status < 0) {
+		mlog_errno(status);
+		goto out_trans;
+	}
+	status = ocfs2_local_release_dquot(handle, dquot);
+	/*
+	 * If we fail here, we cannot do much as global structure is
+	 * already released. So just complain...
+	 */
+	if (status < 0)
+		mlog_errno(status);
+	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_trans:
 	ocfs2_commit_trans(osb, handle);
 out_ilock:
 	ocfs2_unlock_global_qf(oinfo, 1);
 out:
+	mutex_unlock(&dquot->dq_lock);
 	mlog_exit(status);
 	return status;
 }
 
+/*
+ * Read global dquot structure from disk or create it if it does
+ * not exist. Also update use count of the global structure and
+ * create structure in node-local quota file.
+ */
 static int ocfs2_acquire_dquot(struct dquot *dquot)
 {
-	struct ocfs2_mem_dqinfo *oinfo =
-			sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
-	int status = 0;
+	int status = 0, err;
+	int ex = 0;
+	struct super_block *sb = dquot->dq_sb;
+	struct ocfs2_super *osb = OCFS2_SB(sb);
+	int type = dquot->dq_type;
+	struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
+	struct inode *gqinode = info->dqi_gqinode;
+	int need_alloc = ocfs2_global_qinit_alloc(sb, type);
+	handle_t *handle;
 
-	mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
-	/* We need an exclusive lock, because we're going to update use count
-	 * and instantiate possibly new dquot structure */
-	status = ocfs2_lock_global_qf(oinfo, 1);
+	mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+	mutex_lock(&dquot->dq_lock);
+	/*
+	 * We need an exclusive lock, because we're going to update use count
+	 * and instantiate possibly new dquot structure
+	 */
+	status = ocfs2_lock_global_qf(info, 1);
 	if (status < 0)
 		goto out;
-	status = dquot_acquire(dquot);
-	ocfs2_unlock_global_qf(oinfo, 1);
+	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+		status = ocfs2_qinfo_lock(info, 0);
+		if (status < 0)
+			goto out_dq;
+		status = qtree_read_dquot(&info->dqi_gi, dquot);
+		ocfs2_qinfo_unlock(info, 0);
+		if (status < 0)
+			goto out_dq;
+	}
+	set_bit(DQ_READ_B, &dquot->dq_flags);
+
+	OCFS2_DQUOT(dquot)->dq_use_count++;
+	OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
+	OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
+	if (!dquot->dq_off) {	/* No real quota entry? */
+		ex = 1;
+		/*
+		 * Add blocks to quota file before we start a transaction since
+		 * locking allocators ranks above a transaction start
+		 */
+		WARN_ON(journal_current_handle());
+		status = ocfs2_extend_no_holes(gqinode,
+			gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
+			gqinode->i_size);
+		if (status < 0)
+			goto out_dq;
+	}
+
+	handle = ocfs2_start_trans(osb,
+				   ocfs2_calc_global_qinit_credits(sb, type));
+	if (IS_ERR(handle)) {
+		status = PTR_ERR(handle);
+		goto out_dq;
+	}
+	status = ocfs2_qinfo_lock(info, ex);
+	if (status < 0)
+		goto out_trans;
+	status = qtree_write_dquot(&info->dqi_gi, dquot);
+	if (ex && info_dirty(sb_dqinfo(sb, type))) {
+		err = __ocfs2_global_write_info(sb, type);
+		if (!status)
+			status = err;
+	}
+	ocfs2_qinfo_unlock(info, ex);
+out_trans:
+	ocfs2_commit_trans(osb, handle);
+out_dq:
+	ocfs2_unlock_global_qf(info, 1);
+	if (status < 0)
+		goto out;
+
+	status = ocfs2_create_local_dquot(dquot);
+	if (status < 0)
+		goto out;
+	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 out:
+	mutex_unlock(&dquot->dq_lock);
 	mlog_exit(status);
 	return status;
 }
@@ -768,7 +832,6 @@
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 
 	mlog_entry("id=%u, type=%d", dquot->dq_id, type);
-	dquot_mark_dquot_dirty(dquot);
 
 	/* In case user set some limits, sync dquot immediately to global
 	 * quota file so that information propagates quicker */
@@ -791,14 +854,16 @@
 		mlog_errno(status);
 		goto out_ilock;
 	}
+	mutex_lock(&sb_dqopt(sb)->dqio_mutex);
 	status = ocfs2_sync_dquot(dquot);
 	if (status < 0) {
 		mlog_errno(status);
-		goto out_trans;
+		goto out_dlock;
 	}
 	/* Now write updated local dquot structure */
-	status = dquot_commit(dquot);
-out_trans:
+	status = ocfs2_local_write_dquot(dquot);
+out_dlock:
+	mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
 	ocfs2_commit_trans(osb, handle);
 out_ilock:
 	ocfs2_unlock_global_qf(oinfo, 1);
@@ -850,7 +915,7 @@
 }
 
 const struct dquot_operations ocfs2_quota_operations = {
-	.write_dquot	= ocfs2_write_dquot,
+	/* We never make dquot dirty so .write_dquot is never called */
 	.acquire_dquot	= ocfs2_acquire_dquot,
 	.release_dquot	= ocfs2_release_dquot,
 	.mark_dirty	= ocfs2_mark_dquot_dirty,
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 884b641..8bd70d4 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -22,6 +22,7 @@
 #include "dlmglue.h"
 #include "quota.h"
 #include "uptodate.h"
+#include "super.h"
 
 /* Number of local quota structures per block */
 static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -129,6 +130,39 @@
 	return 0;
 }
 
+/*
+ * Read quota block from a given logical offset.
+ *
+ * This function acquires ip_alloc_sem and thus it must not be called with a
+ * transaction started.
+ */
+static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
+				  struct buffer_head **bh)
+{
+	int rc = 0;
+	struct buffer_head *tmp = *bh;
+
+	if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
+		ocfs2_error(inode->i_sb,
+			    "Quota file %llu is probably corrupted! Requested "
+			    "to read block %Lu but file has size only %Lu\n",
+			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
+			    (unsigned long long)v_block,
+			    (unsigned long long)i_size_read(inode));
+		return -EIO;
+	}
+	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
+				    ocfs2_validate_quota_block);
+	if (rc)
+		mlog_errno(rc);
+
+	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
+	if (!rc && !*bh)
+		*bh = tmp;
+
+	return rc;
+}
+
 /* Check whether we understand format of quota files */
 static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
 {
@@ -671,7 +705,7 @@
 	INIT_LIST_HEAD(&oinfo->dqi_chunk);
 	oinfo->dqi_rec = NULL;
 	oinfo->dqi_lqi_bh = NULL;
-	oinfo->dqi_ibh = NULL;
+	oinfo->dqi_libh = NULL;
 
 	status = ocfs2_global_read_info(sb, type);
 	if (status < 0)
@@ -697,7 +731,7 @@
 	info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
 	oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
 	oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
-	oinfo->dqi_ibh = bh;
+	oinfo->dqi_libh = bh;
 
 	/* We crashed when using local quota file? */
 	if (!(info->dqi_flags & OLQF_CLEAN)) {
@@ -759,7 +793,7 @@
 {
 	struct mem_dqinfo *info = sb_dqinfo(sb, type);
 	struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv)
-						->dqi_ibh;
+						->dqi_libh;
 	int status;
 
 	status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
@@ -782,10 +816,6 @@
 	int mark_clean = 1, len;
 	int status;
 
-	/* At this point we know there are no more dquots and thus
-	 * even if there's some sync in the pdflush queue, it won't
-	 * find any dquots and return without doing anything */
-	cancel_delayed_work_sync(&oinfo->dqi_sync_work);
 	iput(oinfo->dqi_gqinode);
 	ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
 	ocfs2_lock_res_free(&oinfo->dqi_gqlock);
@@ -820,7 +850,7 @@
 	/* Mark local file as clean */
 	info->dqi_flags |= OLQF_CLEAN;
 	status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
-				 oinfo->dqi_ibh,
+				 oinfo->dqi_libh,
 				 olq_update_info,
 				 info);
 	if (status < 0) {
@@ -830,7 +860,7 @@
 
 out:
 	ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
-	brelse(oinfo->dqi_ibh);
+	brelse(oinfo->dqi_libh);
 	brelse(oinfo->dqi_lqi_bh);
 	kfree(oinfo);
 	return 0;
@@ -858,22 +888,21 @@
 }
 
 /* Write dquot to local quota file */
-static int ocfs2_local_write_dquot(struct dquot *dquot)
+int ocfs2_local_write_dquot(struct dquot *dquot)
 {
 	struct super_block *sb = dquot->dq_sb;
 	struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh;
+	struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
 	int status;
 
-	status = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type],
-				    ol_dqblk_file_block(sb, od->dq_local_off),
-				    &bh);
+	status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
+					     &bh);
 	if (status) {
 		mlog_errno(status);
 		goto out;
 	}
-	status = ocfs2_modify_bh(sb_dqopt(sb)->files[dquot->dq_type], bh,
-				 olq_set_dquot, od);
+	status = ocfs2_modify_bh(lqinode, bh, olq_set_dquot, od);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
@@ -973,10 +1002,8 @@
 	}
 
 	/* Initialize chunk header */
-	down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
 					     &p_blkno, NULL, NULL);
-	up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out_trans;
@@ -1004,10 +1031,8 @@
 	ocfs2_journal_dirty(handle, bh);
 
 	/* Initialize new block with structures */
-	down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
 					     &p_blkno, NULL, NULL);
-	up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out_trans;
@@ -1104,10 +1129,8 @@
 	}
 
 	/* Get buffer from the just added block */
-	down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
 					     &p_blkno, NULL, NULL);
-	up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
@@ -1188,7 +1211,7 @@
 }
 
 /* Create dquot in the local file for given id */
-static int ocfs2_create_local_dquot(struct dquot *dquot)
+int ocfs2_create_local_dquot(struct dquot *dquot)
 {
 	struct super_block *sb = dquot->dq_sb;
 	int type = dquot->dq_type;
@@ -1197,17 +1220,27 @@
 	struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
 	int offset;
 	int status;
+	u64 pcount;
 
+	down_write(&OCFS2_I(lqinode)->ip_alloc_sem);
 	chunk = ocfs2_find_free_entry(sb, type, &offset);
 	if (!chunk) {
 		chunk = ocfs2_extend_local_quota_file(sb, type, &offset);
-		if (IS_ERR(chunk))
-			return PTR_ERR(chunk);
+		if (IS_ERR(chunk)) {
+			status = PTR_ERR(chunk);
+			goto out;
+		}
 	} else if (IS_ERR(chunk)) {
-		return PTR_ERR(chunk);
+		status = PTR_ERR(chunk);
+		goto out;
 	}
 	od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset);
 	od->dq_chunk = chunk;
+	status = ocfs2_extent_map_get_blocks(lqinode,
+				     ol_dqblk_block(sb, chunk->qc_num, offset),
+				     &od->dq_local_phys_blk,
+				     &pcount,
+				     NULL);
 
 	/* Initialize dquot structure on disk */
 	status = ocfs2_local_write_dquot(dquot);
@@ -1224,39 +1257,15 @@
 		goto out;
 	}
 out:
+	up_write(&OCFS2_I(lqinode)->ip_alloc_sem);
 	return status;
 }
 
-/* Create entry in local file for dquot, load data from the global file */
-static int ocfs2_local_read_dquot(struct dquot *dquot)
-{
-	int status;
-
-	mlog_entry("id=%u, type=%d\n", dquot->dq_id, dquot->dq_type);
-
-	status = ocfs2_global_read_dquot(dquot);
-	if (status < 0) {
-		mlog_errno(status);
-		goto out_err;
-	}
-
-	/* Now create entry in the local quota file */
-	status = ocfs2_create_local_dquot(dquot);
-	if (status < 0) {
-		mlog_errno(status);
-		goto out_err;
-	}
-	mlog_exit(0);
-	return 0;
-out_err:
-	mlog_exit(status);
-	return status;
-}
-
-/* Release dquot structure from local quota file. ocfs2_release_dquot() has
- * already started a transaction and obtained exclusive lock for global
- * quota file. */
-static int ocfs2_local_release_dquot(struct dquot *dquot)
+/*
+ * Release dquot structure from local quota file. ocfs2_release_dquot() has
+ * already started a transaction and written all changes to global quota file
+ */
+int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
 {
 	int status;
 	int type = dquot->dq_type;
@@ -1264,15 +1273,6 @@
 	struct super_block *sb = dquot->dq_sb;
 	struct ocfs2_local_disk_chunk *dchunk;
 	int offset;
-	handle_t *handle = journal_current_handle();
-
-	BUG_ON(!handle);
-	/* First write all local changes to global file */
-	status = ocfs2_global_release_dquot(dquot);
-	if (status < 0) {
-		mlog_errno(status);
-		goto out;
-	}
 
 	status = ocfs2_journal_access_dq(handle,
 			INODE_CACHE(sb_dqopt(sb)->files[type]),
@@ -1305,9 +1305,6 @@
 	.read_file_info		= ocfs2_local_read_info,
 	.write_file_info	= ocfs2_global_write_info,
 	.free_file_info		= ocfs2_local_free_info,
-	.read_dqblk		= ocfs2_local_read_dquot,
-	.commit_dqblk		= ocfs2_local_write_dquot,
-	.release_dqblk		= ocfs2_local_release_dquot,
 };
 
 struct quota_format_type ocfs2_quota_format = {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 1c2c39f..2c26ce2 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -938,12 +938,16 @@
 	int type;
 	struct inode *inode;
 	struct super_block *sb = osb->sb;
+	struct ocfs2_mem_dqinfo *oinfo;
 
 	/* We mostly ignore errors in this function because there's not much
 	 * we can do when we see them */
 	for (type = 0; type < MAXQUOTAS; type++) {
 		if (!sb_has_quota_loaded(sb, type))
 			continue;
+		/* Cancel periodic syncing before we grab dqonoff_mutex */
+		oinfo = sb_dqinfo(sb, type)->dqi_priv;
+		cancel_delayed_work_sync(&oinfo->dqi_sync_work);
 		inode = igrab(sb->s_dquot.files[type]);
 		/* Turn off quotas. This will remove all dquot structures from
 		 * memory and so they will be automatically synced to global
diff --git a/fs/partitions/acorn.c b/fs/partitions/acorn.c
index a97b477..6921e78 100644
--- a/fs/partitions/acorn.c
+++ b/fs/partitions/acorn.c
@@ -70,14 +70,14 @@
 
 #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
 	defined(CONFIG_ACORN_PARTITION_ADFS)
-static int
-riscix_partition(struct parsed_partitions *state, struct block_device *bdev,
-		unsigned long first_sect, int slot, unsigned long nr_sects)
+static int riscix_partition(struct parsed_partitions *state,
+			    unsigned long first_sect, int slot,
+			    unsigned long nr_sects)
 {
 	Sector sect;
 	struct riscix_record *rr;
 	
-	rr = (struct riscix_record *)read_dev_sector(bdev, first_sect, &sect);
+	rr = read_part_sector(state, first_sect, &sect);
 	if (!rr)
 		return -1;
 
@@ -123,9 +123,9 @@
 
 #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
 	defined(CONFIG_ACORN_PARTITION_ADFS)
-static int
-linux_partition(struct parsed_partitions *state, struct block_device *bdev,
-		unsigned long first_sect, int slot, unsigned long nr_sects)
+static int linux_partition(struct parsed_partitions *state,
+			   unsigned long first_sect, int slot,
+			   unsigned long nr_sects)
 {
 	Sector sect;
 	struct linux_part *linuxp;
@@ -135,7 +135,7 @@
 
 	put_partition(state, slot++, first_sect, size);
 
-	linuxp = (struct linux_part *)read_dev_sector(bdev, first_sect, &sect);
+	linuxp = read_part_sector(state, first_sect, &sect);
 	if (!linuxp)
 		return -1;
 
@@ -157,8 +157,7 @@
 #endif
 
 #ifdef CONFIG_ACORN_PARTITION_CUMANA
-int
-adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev)
+int adfspart_check_CUMANA(struct parsed_partitions *state)
 {
 	unsigned long first_sector = 0;
 	unsigned int start_blk = 0;
@@ -185,7 +184,7 @@
 		struct adfs_discrecord *dr;
 		unsigned int nr_sects;
 
-		data = read_dev_sector(bdev, start_blk * 2 + 6, &sect);
+		data = read_part_sector(state, start_blk * 2 + 6, &sect);
 		if (!data)
 			return -1;
 
@@ -217,14 +216,14 @@
 #ifdef CONFIG_ACORN_PARTITION_RISCIX
 		case PARTITION_RISCIX_SCSI:
 			/* RISCiX - we don't know how to find the next one. */
-			slot = riscix_partition(state, bdev, first_sector,
-						 slot, nr_sects);
+			slot = riscix_partition(state, first_sector, slot,
+						nr_sects);
 			break;
 #endif
 
 		case PARTITION_LINUX:
-			slot = linux_partition(state, bdev, first_sector,
-						slot, nr_sects);
+			slot = linux_partition(state, first_sector, slot,
+					       nr_sects);
 			break;
 		}
 		put_dev_sector(sect);
@@ -249,8 +248,7 @@
  *	    hda1 = ADFS partition on first drive.
  *	    hda2 = non-ADFS partition.
  */
-int
-adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev)
+int adfspart_check_ADFS(struct parsed_partitions *state)
 {
 	unsigned long start_sect, nr_sects, sectscyl, heads;
 	Sector sect;
@@ -259,7 +257,7 @@
 	unsigned char id;
 	int slot = 1;
 
-	data = read_dev_sector(bdev, 6, &sect);
+	data = read_part_sector(state, 6, &sect);
 	if (!data)
 		return -1;
 
@@ -278,21 +276,21 @@
 	/*
 	 * Work out start of non-adfs partition.
 	 */
-	nr_sects = (bdev->bd_inode->i_size >> 9) - start_sect;
+	nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
 
 	if (start_sect) {
 		switch (id) {
 #ifdef CONFIG_ACORN_PARTITION_RISCIX
 		case PARTITION_RISCIX_SCSI:
 		case PARTITION_RISCIX_MFM:
-			slot = riscix_partition(state, bdev, start_sect,
-						 slot, nr_sects);
+			slot = riscix_partition(state, start_sect, slot,
+						nr_sects);
 			break;
 #endif
 
 		case PARTITION_LINUX:
-			slot = linux_partition(state, bdev, start_sect,
-						slot, nr_sects);
+			slot = linux_partition(state, start_sect, slot,
+					       nr_sects);
 			break;
 		}
 	}
@@ -308,10 +306,11 @@
 	__le32 size;
 };
 
-static int adfspart_check_ICSLinux(struct block_device *bdev, unsigned long block)
+static int adfspart_check_ICSLinux(struct parsed_partitions *state,
+				   unsigned long block)
 {
 	Sector sect;
-	unsigned char *data = read_dev_sector(bdev, block, &sect);
+	unsigned char *data = read_part_sector(state, block, &sect);
 	int result = 0;
 
 	if (data) {
@@ -349,8 +348,7 @@
  *	    hda2 = ADFS partition 1 on first drive.
  *		..etc..
  */
-int
-adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev)
+int adfspart_check_ICS(struct parsed_partitions *state)
 {
 	const unsigned char *data;
 	const struct ics_part *p;
@@ -360,7 +358,7 @@
 	/*
 	 * Try ICS style partitions - sector 0 contains partition info.
 	 */
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 	    	return -1;
 
@@ -392,7 +390,7 @@
 			 * partition is.  We must not make this visible
 			 * to the filesystem.
 			 */
-			if (size > 1 && adfspart_check_ICSLinux(bdev, start)) {
+			if (size > 1 && adfspart_check_ICSLinux(state, start)) {
 				start += 1;
 				size -= 1;
 			}
@@ -446,8 +444,7 @@
  *	    hda2 = ADFS partition 1 on first drive.
  *		..etc..
  */
-int
-adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev)
+int adfspart_check_POWERTEC(struct parsed_partitions *state)
 {
 	Sector sect;
 	const unsigned char *data;
@@ -455,7 +452,7 @@
 	int slot = 1;
 	int i;
 
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 		return -1;
 
@@ -508,8 +505,7 @@
  *  1. The individual ADFS boot block entries that are placed on the disk.
  *  2. The start address of the next entry.
  */
-int
-adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev)
+int adfspart_check_EESOX(struct parsed_partitions *state)
 {
 	Sector sect;
 	const unsigned char *data;
@@ -518,7 +514,7 @@
 	sector_t start = 0;
 	int i, slot = 1;
 
-	data = read_dev_sector(bdev, 7, &sect);
+	data = read_part_sector(state, 7, &sect);
 	if (!data)
 		return -1;
 
@@ -545,7 +541,7 @@
 	if (i != 0) {
 		sector_t size;
 
-		size = get_capacity(bdev->bd_disk);
+		size = get_capacity(state->bdev->bd_disk);
 		put_partition(state, slot++, start, size - start);
 		printk("\n");
 	}
diff --git a/fs/partitions/acorn.h b/fs/partitions/acorn.h
index 81fd50e..ede8285 100644
--- a/fs/partitions/acorn.h
+++ b/fs/partitions/acorn.h
@@ -7,8 +7,8 @@
  *  format, and everyone stick to it?
  */
 
-int adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev);
-int adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev);
-int adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev);
-int adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev);
-int adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_CUMANA(struct parsed_partitions *state);
+int adfspart_check_ADFS(struct parsed_partitions *state);
+int adfspart_check_ICS(struct parsed_partitions *state);
+int adfspart_check_POWERTEC(struct parsed_partitions *state);
+int adfspart_check_EESOX(struct parsed_partitions *state);
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
index 9917a8c..ba443d4 100644
--- a/fs/partitions/amiga.c
+++ b/fs/partitions/amiga.c
@@ -23,8 +23,7 @@
 	return sum;
 }
 
-int
-amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
+int amiga_partition(struct parsed_partitions *state)
 {
 	Sector sect;
 	unsigned char *data;
@@ -38,11 +37,11 @@
 	for (blk = 0; ; blk++, put_dev_sector(sect)) {
 		if (blk == RDB_ALLOCATION_LIMIT)
 			goto rdb_done;
-		data = read_dev_sector(bdev, blk, &sect);
+		data = read_part_sector(state, blk, &sect);
 		if (!data) {
 			if (warn_no_part)
 				printk("Dev %s: unable to read RDB block %d\n",
-				       bdevname(bdev, b), blk);
+				       bdevname(state->bdev, b), blk);
 			res = -1;
 			goto rdb_done;
 		}
@@ -64,7 +63,7 @@
 		}
 
 		printk("Dev %s: RDB in block %d has bad checksum\n",
-			       bdevname(bdev, b), blk);
+		       bdevname(state->bdev, b), blk);
 	}
 
 	/* blksize is blocks per 512 byte standard block */
@@ -75,11 +74,11 @@
 	put_dev_sector(sect);
 	for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
 		blk *= blksize;	/* Read in terms partition table understands */
-		data = read_dev_sector(bdev, blk, &sect);
+		data = read_part_sector(state, blk, &sect);
 		if (!data) {
 			if (warn_no_part)
 				printk("Dev %s: unable to read partition block %d\n",
-				       bdevname(bdev, b), blk);
+				       bdevname(state->bdev, b), blk);
 			res = -1;
 			goto rdb_done;
 		}
diff --git a/fs/partitions/amiga.h b/fs/partitions/amiga.h
index 2f3e9ce..d094585 100644
--- a/fs/partitions/amiga.h
+++ b/fs/partitions/amiga.h
@@ -2,5 +2,5 @@
  *  fs/partitions/amiga.h
  */
 
-int amiga_partition(struct parsed_partitions *state, struct block_device *bdev);
+int amiga_partition(struct parsed_partitions *state);
 
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
index 1f3572d..4439ff1 100644
--- a/fs/partitions/atari.c
+++ b/fs/partitions/atari.c
@@ -30,7 +30,7 @@
 		memcmp (s, "RAW", 3) == 0 ;
 }
 
-int atari_partition(struct parsed_partitions *state, struct block_device *bdev)
+int atari_partition(struct parsed_partitions *state)
 {
 	Sector sect;
 	struct rootsector *rs;
@@ -42,12 +42,12 @@
 	int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
 #endif
 
-	rs = (struct rootsector *) read_dev_sector(bdev, 0, &sect);
+	rs = read_part_sector(state, 0, &sect);
 	if (!rs)
 		return -1;
 
 	/* Verify this is an Atari rootsector: */
-	hd_size = bdev->bd_inode->i_size >> 9;
+	hd_size = state->bdev->bd_inode->i_size >> 9;
 	if (!VALID_PARTITION(&rs->part[0], hd_size) &&
 	    !VALID_PARTITION(&rs->part[1], hd_size) &&
 	    !VALID_PARTITION(&rs->part[2], hd_size) &&
@@ -84,7 +84,7 @@
 		printk(" XGM<");
 		partsect = extensect = be32_to_cpu(pi->st);
 		while (1) {
-			xrs = (struct rootsector *)read_dev_sector(bdev, partsect, &sect2);
+			xrs = read_part_sector(state, partsect, &sect2);
 			if (!xrs) {
 				printk (" block %ld read failed\n", partsect);
 				put_dev_sector(sect);
diff --git a/fs/partitions/atari.h b/fs/partitions/atari.h
index 63186b0..fe2d32a 100644
--- a/fs/partitions/atari.h
+++ b/fs/partitions/atari.h
@@ -31,4 +31,4 @@
   u16 checksum;			/* checksum for bootable disks */
 } __attribute__((__packed__));
 
-int atari_partition(struct parsed_partitions *state, struct block_device *bdev);
+int atari_partition(struct parsed_partitions *state);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index e238ab2..5dcd4b0 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -45,7 +45,7 @@
 
 int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
 
-static int (*check_part[])(struct parsed_partitions *, struct block_device *) = {
+static int (*check_part[])(struct parsed_partitions *) = {
 	/*
 	 * Probe partition formats with tables at disk address 0
 	 * that also have an ADFS boot block at 0xdc0.
@@ -161,10 +161,11 @@
 	struct parsed_partitions *state;
 	int i, res, err;
 
-	state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+	state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
 	if (!state)
 		return NULL;
 
+	state->bdev = bdev;
 	disk_name(hd, 0, state->name);
 	printk(KERN_INFO " %s:", state->name);
 	if (isdigit(state->name[strlen(state->name)-1]))
@@ -174,7 +175,7 @@
 	i = res = err = 0;
 	while (!res && check_part[i]) {
 		memset(&state->parts, 0, sizeof(state->parts));
-		res = check_part[i++](state, bdev);
+		res = check_part[i++](state);
 		if (res < 0) {
 			/* We have hit an I/O error which we don't report now.
 		 	* But record it, and let the others do their job.
@@ -186,6 +187,8 @@
 	}
 	if (res > 0)
 		return state;
+	if (state->access_beyond_eod)
+		err = -ENOSPC;
 	if (err)
 	/* The partition is unrecognized. So report I/O errors if there were any */
 		res = err;
@@ -538,12 +541,33 @@
 	disk_part_iter_exit(&piter);
 }
 
+static bool disk_unlock_native_capacity(struct gendisk *disk)
+{
+	const struct block_device_operations *bdops = disk->fops;
+
+	if (bdops->unlock_native_capacity &&
+	    !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) {
+		printk(KERN_CONT "enabling native capacity\n");
+		bdops->unlock_native_capacity(disk);
+		disk->flags |= GENHD_FL_NATIVE_CAPACITY;
+		return true;
+	} else {
+		printk(KERN_CONT "truncated\n");
+		return false;
+	}
+}
+
 int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
 {
+	struct parsed_partitions *state = NULL;
 	struct disk_part_iter piter;
 	struct hd_struct *part;
-	struct parsed_partitions *state;
 	int p, highest, res;
+rescan:
+	if (state && !IS_ERR(state)) {
+		kfree(state);
+		state = NULL;
+	}
 
 	if (bdev->bd_part_count)
 		return -EBUSY;
@@ -562,8 +586,32 @@
 	bdev->bd_invalidated = 0;
 	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
 		return 0;
-	if (IS_ERR(state))	/* I/O error reading the partition table */
+	if (IS_ERR(state)) {
+		/*
+		 * I/O error reading the partition table.  If any
+		 * partition code tried to read beyond EOD, retry
+		 * after unlocking native capacity.
+		 */
+		if (PTR_ERR(state) == -ENOSPC) {
+			printk(KERN_WARNING "%s: partition table beyond EOD, ",
+			       disk->disk_name);
+			if (disk_unlock_native_capacity(disk))
+				goto rescan;
+		}
 		return -EIO;
+	}
+	/*
+	 * If any partition code tried to read beyond EOD, try
+	 * unlocking native capacity even if partition table is
+	 * sucessfully read as we could be missing some partitions.
+	 */
+	if (state->access_beyond_eod) {
+		printk(KERN_WARNING
+		       "%s: partition table partially beyond EOD, ",
+		       disk->disk_name);
+		if (disk_unlock_native_capacity(disk))
+			goto rescan;
+	}
 
 	/* tell userspace that the media / partition table may have changed */
 	kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
@@ -581,7 +629,7 @@
 	/* add partitions */
 	for (p = 1; p < state->limit; p++) {
 		sector_t size, from;
-try_scan:
+
 		size = state->parts[p].size;
 		if (!size)
 			continue;
@@ -589,30 +637,21 @@
 		from = state->parts[p].from;
 		if (from >= get_capacity(disk)) {
 			printk(KERN_WARNING
-			       "%s: p%d ignored, start %llu is behind the end of the disk\n",
+			       "%s: p%d start %llu is beyond EOD, ",
 			       disk->disk_name, p, (unsigned long long) from);
+			if (disk_unlock_native_capacity(disk))
+				goto rescan;
 			continue;
 		}
 
 		if (from + size > get_capacity(disk)) {
-			const struct block_device_operations *bdops = disk->fops;
-			unsigned long long capacity;
-
 			printk(KERN_WARNING
-			       "%s: p%d size %llu exceeds device capacity, ",
+			       "%s: p%d size %llu extends beyond EOD, ",
 			       disk->disk_name, p, (unsigned long long) size);
 
-			if (bdops->set_capacity &&
-			    (disk->flags & GENHD_FL_NATIVE_CAPACITY) == 0) {
-				printk(KERN_CONT "enabling native capacity\n");
-				capacity = bdops->set_capacity(disk, ~0ULL);
-				disk->flags |= GENHD_FL_NATIVE_CAPACITY;
-				if (capacity > get_capacity(disk)) {
-					set_capacity(disk, capacity);
-					check_disk_size_change(disk, bdev);
-					bdev->bd_invalidated = 0;
-				}
-				goto try_scan;
+			if (disk_unlock_native_capacity(disk)) {
+				/* free state and restart */
+				goto rescan;
 			} else {
 				/*
 				 * we can not ignore partitions of broken tables
@@ -620,7 +659,6 @@
 				 * we limit them to the end of the disk to avoid
 				 * creating invalid block devices
 				 */
-				printk(KERN_CONT "limited to end of disk\n");
 				size = get_capacity(disk) - from;
 			}
 		}
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
index 98dbe1a..52f8bd3 100644
--- a/fs/partitions/check.h
+++ b/fs/partitions/check.h
@@ -6,6 +6,7 @@
  * description.
  */
 struct parsed_partitions {
+	struct block_device *bdev;
 	char name[BDEVNAME_SIZE];
 	struct {
 		sector_t from;
@@ -14,8 +15,19 @@
 	} parts[DISK_MAX_PARTS];
 	int next;
 	int limit;
+	bool access_beyond_eod;
 };
 
+static inline void *read_part_sector(struct parsed_partitions *state,
+				     sector_t n, Sector *p)
+{
+	if (n >= get_capacity(state->bdev->bd_disk)) {
+		state->access_beyond_eod = true;
+		return NULL;
+	}
+	return read_dev_sector(state->bdev, n, p);
+}
+
 static inline void
 put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
 {
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 91babda..9e346c1 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -140,8 +140,7 @@
  *  the part[0] entry for this disk, and is the number of
  *  physical sectors available on the disk.
  */
-static u64
-last_lba(struct block_device *bdev)
+static u64 last_lba(struct block_device *bdev)
 {
 	if (!bdev || !bdev->bd_inode)
 		return 0;
@@ -181,27 +180,28 @@
 
 /**
  * read_lba(): Read bytes from disk, starting at given LBA
- * @bdev
+ * @state
  * @lba
  * @buffer
  * @size_t
  *
- * Description:  Reads @count bytes from @bdev into @buffer.
+ * Description: Reads @count bytes from @state->bdev into @buffer.
  * Returns number of bytes read on success, 0 on error.
  */
-static size_t
-read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
+static size_t read_lba(struct parsed_partitions *state,
+		       u64 lba, u8 *buffer, size_t count)
 {
 	size_t totalreadcount = 0;
+	struct block_device *bdev = state->bdev;
 	sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
 
-	if (!bdev || !buffer || lba > last_lba(bdev))
+	if (!buffer || lba > last_lba(bdev))
                 return 0;
 
 	while (count) {
 		int copied = 512;
 		Sector sect;
-		unsigned char *data = read_dev_sector(bdev, n++, &sect);
+		unsigned char *data = read_part_sector(state, n++, &sect);
 		if (!data)
 			break;
 		if (copied > count)
@@ -217,19 +217,20 @@
 
 /**
  * alloc_read_gpt_entries(): reads partition entries from disk
- * @bdev
+ * @state
  * @gpt - GPT header
  * 
  * Description: Returns ptes on success,  NULL on error.
  * Allocates space for PTEs based on information found in @gpt.
  * Notes: remember to free pte when you're done!
  */
-static gpt_entry *
-alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
+static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+					 gpt_header *gpt)
 {
 	size_t count;
 	gpt_entry *pte;
-	if (!bdev || !gpt)
+
+	if (!gpt)
 		return NULL;
 
 	count = le32_to_cpu(gpt->num_partition_entries) *
@@ -240,7 +241,7 @@
 	if (!pte)
 		return NULL;
 
-	if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
+	if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
                      (u8 *) pte,
 		     count) < count) {
 		kfree(pte);
@@ -252,27 +253,24 @@
 
 /**
  * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk
- * @bdev
+ * @state
  * @lba is the Logical Block Address of the partition table
  * 
  * Description: returns GPT header on success, NULL on error.   Allocates
- * and fills a GPT header starting at @ from @bdev.
+ * and fills a GPT header starting at @ from @state->bdev.
  * Note: remember to free gpt when finished with it.
  */
-static gpt_header *
-alloc_read_gpt_header(struct block_device *bdev, u64 lba)
+static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
+					 u64 lba)
 {
 	gpt_header *gpt;
-	unsigned ssz = bdev_logical_block_size(bdev);
-
-	if (!bdev)
-		return NULL;
+	unsigned ssz = bdev_logical_block_size(state->bdev);
 
 	gpt = kzalloc(ssz, GFP_KERNEL);
 	if (!gpt)
 		return NULL;
 
-	if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) {
+	if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) {
 		kfree(gpt);
                 gpt=NULL;
 		return NULL;
@@ -283,7 +281,7 @@
 
 /**
  * is_gpt_valid() - tests one GPT header and PTEs for validity
- * @bdev
+ * @state
  * @lba is the logical block address of the GPT header to test
  * @gpt is a GPT header ptr, filled on return.
  * @ptes is a PTEs ptr, filled on return.
@@ -291,16 +289,15 @@
  * Description: returns 1 if valid,  0 on error.
  * If valid, returns pointers to newly allocated GPT header and PTEs.
  */
-static int
-is_gpt_valid(struct block_device *bdev, u64 lba,
-	     gpt_header **gpt, gpt_entry **ptes)
+static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
+			gpt_header **gpt, gpt_entry **ptes)
 {
 	u32 crc, origcrc;
 	u64 lastlba;
 
-	if (!bdev || !gpt || !ptes)
+	if (!ptes)
 		return 0;
-	if (!(*gpt = alloc_read_gpt_header(bdev, lba)))
+	if (!(*gpt = alloc_read_gpt_header(state, lba)))
 		return 0;
 
 	/* Check the GUID Partition Table signature */
@@ -336,7 +333,7 @@
 	/* Check the first_usable_lba and last_usable_lba are
 	 * within the disk.
 	 */
-	lastlba = last_lba(bdev);
+	lastlba = last_lba(state->bdev);
 	if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
 		pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
 			 (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
@@ -350,7 +347,7 @@
 		goto fail;
 	}
 
-	if (!(*ptes = alloc_read_gpt_entries(bdev, *gpt)))
+	if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
 		goto fail;
 
 	/* Check the GUID Partition Entry Array CRC */
@@ -495,7 +492,7 @@
 
 /**
  * find_valid_gpt() - Search disk for valid GPT headers and PTEs
- * @bdev
+ * @state
  * @gpt is a GPT header ptr, filled on return.
  * @ptes is a PTEs ptr, filled on return.
  * Description: Returns 1 if valid, 0 on error.
@@ -508,24 +505,25 @@
  * This protects against devices which misreport their size, and forces
  * the user to decide to use the Alternate GPT.
  */
-static int
-find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes)
+static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
+			  gpt_entry **ptes)
 {
 	int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
 	gpt_header *pgpt = NULL, *agpt = NULL;
 	gpt_entry *pptes = NULL, *aptes = NULL;
 	legacy_mbr *legacymbr;
 	u64 lastlba;
-	if (!bdev || !gpt || !ptes)
+
+	if (!ptes)
 		return 0;
 
-	lastlba = last_lba(bdev);
+	lastlba = last_lba(state->bdev);
         if (!force_gpt) {
                 /* This will be added to the EFI Spec. per Intel after v1.02. */
                 legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
                 if (legacymbr) {
-                        read_lba(bdev, 0, (u8 *) legacymbr,
-                                 sizeof (*legacymbr));
+                        read_lba(state, 0, (u8 *) legacymbr,
+				 sizeof (*legacymbr));
                         good_pmbr = is_pmbr_valid(legacymbr);
                         kfree(legacymbr);
                 }
@@ -533,15 +531,14 @@
                         goto fail;
         }
 
-	good_pgpt = is_gpt_valid(bdev, GPT_PRIMARY_PARTITION_TABLE_LBA,
+	good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
 				 &pgpt, &pptes);
         if (good_pgpt)
-		good_agpt = is_gpt_valid(bdev,
+		good_agpt = is_gpt_valid(state,
 					 le64_to_cpu(pgpt->alternate_lba),
 					 &agpt, &aptes);
         if (!good_agpt && force_gpt)
-                good_agpt = is_gpt_valid(bdev, lastlba,
-                                         &agpt, &aptes);
+                good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
 
         /* The obviously unsuccessful case */
         if (!good_pgpt && !good_agpt)
@@ -583,9 +580,8 @@
 }
 
 /**
- * efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+ * efi_partition(struct parsed_partitions *state)
  * @state
- * @bdev
  *
  * Description: called from check.c, if the disk contains GPT
  * partitions, sets up partition entries in the kernel.
@@ -602,15 +598,14 @@
  *  1 if successful
  *
  */
-int
-efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+int efi_partition(struct parsed_partitions *state)
 {
 	gpt_header *gpt = NULL;
 	gpt_entry *ptes = NULL;
 	u32 i;
-	unsigned ssz = bdev_logical_block_size(bdev) / 512;
+	unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
 
-	if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
+	if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
 		kfree(gpt);
 		kfree(ptes);
 		return 0;
@@ -623,7 +618,7 @@
 		u64 size = le64_to_cpu(ptes[i].ending_lba) -
 			   le64_to_cpu(ptes[i].starting_lba) + 1ULL;
 
-		if (!is_pte_valid(&ptes[i], last_lba(bdev)))
+		if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
 			continue;
 
 		put_partition(state, i+1, start * ssz, size * ssz);
diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h
index 6998b589..b69ab72 100644
--- a/fs/partitions/efi.h
+++ b/fs/partitions/efi.h
@@ -110,7 +110,7 @@
 } __attribute__ ((packed)) legacy_mbr;
 
 /* Functions */
-extern int efi_partition(struct parsed_partitions *state, struct block_device *bdev);
+extern int efi_partition(struct parsed_partitions *state);
 
 #endif
 
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index fc71aab..3e73de5 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -58,9 +58,9 @@
 
 /*
  */
-int
-ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
+int ibm_partition(struct parsed_partitions *state)
 {
+	struct block_device *bdev = state->bdev;
 	int blocksize, res;
 	loff_t i_size, offset, size, fmt_size;
 	dasd_information2_t *info;
@@ -100,7 +100,8 @@
 	/*
 	 * Get volume label, extract name and type.
 	 */
-	data = read_dev_sector(bdev, info->label_block*(blocksize/512), &sect);
+	data = read_part_sector(state, info->label_block*(blocksize/512),
+				&sect);
 	if (data == NULL)
 		goto out_readerr;
 
@@ -193,8 +194,8 @@
 			 */
 			blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
 			counter = 0;
-			data = read_dev_sector(bdev, blk * (blocksize/512),
-					       &sect);
+			data = read_part_sector(state, blk * (blocksize/512),
+						&sect);
 			while (data != NULL) {
 				struct vtoc_format1_label f1;
 
@@ -208,9 +209,8 @@
 				    || f1.DS1FMTID == _ascebc['7']
 				    || f1.DS1FMTID == _ascebc['9']) {
 					blk++;
-					data = read_dev_sector(bdev, blk *
-							       (blocksize/512),
-								&sect);
+					data = read_part_sector(state,
+						blk * (blocksize/512), &sect);
 					continue;
 				}
 
@@ -230,9 +230,8 @@
 					      size * (blocksize >> 9));
 				counter++;
 				blk++;
-				data = read_dev_sector(bdev,
-						       blk * (blocksize/512),
-						       &sect);
+				data = read_part_sector(state,
+						blk * (blocksize/512), &sect);
 			}
 
 			if (!data)
diff --git a/fs/partitions/ibm.h b/fs/partitions/ibm.h
index 31f85a6..08fb080 100644
--- a/fs/partitions/ibm.h
+++ b/fs/partitions/ibm.h
@@ -1 +1 @@
-int ibm_partition(struct parsed_partitions *, struct block_device *);
+int ibm_partition(struct parsed_partitions *);
diff --git a/fs/partitions/karma.c b/fs/partitions/karma.c
index 176d89b..1cc928b 100644
--- a/fs/partitions/karma.c
+++ b/fs/partitions/karma.c
@@ -9,7 +9,7 @@
 #include "check.h"
 #include "karma.h"
 
-int karma_partition(struct parsed_partitions *state, struct block_device *bdev)
+int karma_partition(struct parsed_partitions *state)
 {
 	int i;
 	int slot = 1;
@@ -29,7 +29,7 @@
 	} __attribute__((packed)) *label;
 	struct d_partition *p;
 
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 		return -1;
 
diff --git a/fs/partitions/karma.h b/fs/partitions/karma.h
index ecf7d3f..c764b2e 100644
--- a/fs/partitions/karma.h
+++ b/fs/partitions/karma.h
@@ -4,5 +4,5 @@
 
 #define KARMA_LABEL_MAGIC		0xAB56
 
-int karma_partition(struct parsed_partitions *state, struct block_device *bdev);
+int karma_partition(struct parsed_partitions *state);
 
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 8652fb9..3ceca05 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -309,7 +309,7 @@
 
 /**
  * ldm_validate_privheads - Compare the primary privhead with its backups
- * @bdev:  Device holding the LDM Database
+ * @state: Partition check state including device holding the LDM Database
  * @ph1:   Memory struct to fill with ph contents
  *
  * Read and compare all three privheads from disk.
@@ -321,8 +321,8 @@
  * Return:  'true'   Success
  *          'false'  Error
  */
-static bool ldm_validate_privheads (struct block_device *bdev,
-				    struct privhead *ph1)
+static bool ldm_validate_privheads(struct parsed_partitions *state,
+				   struct privhead *ph1)
 {
 	static const int off[3] = { OFF_PRIV1, OFF_PRIV2, OFF_PRIV3 };
 	struct privhead *ph[3] = { ph1 };
@@ -332,7 +332,7 @@
 	long num_sects;
 	int i;
 
-	BUG_ON (!bdev || !ph1);
+	BUG_ON (!state || !ph1);
 
 	ph[1] = kmalloc (sizeof (*ph[1]), GFP_KERNEL);
 	ph[2] = kmalloc (sizeof (*ph[2]), GFP_KERNEL);
@@ -346,8 +346,8 @@
 
 	/* Read and parse privheads */
 	for (i = 0; i < 3; i++) {
-		data = read_dev_sector (bdev,
-			ph[0]->config_start + off[i], &sect);
+		data = read_part_sector(state, ph[0]->config_start + off[i],
+					&sect);
 		if (!data) {
 			ldm_crit ("Disk read failed.");
 			goto out;
@@ -363,7 +363,7 @@
 		}
 	}
 
-	num_sects = bdev->bd_inode->i_size >> 9;
+	num_sects = state->bdev->bd_inode->i_size >> 9;
 
 	if ((ph[0]->config_start > num_sects) ||
 	   ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
@@ -397,20 +397,20 @@
 
 /**
  * ldm_validate_tocblocks - Validate the table of contents and its backups
- * @bdev:  Device holding the LDM Database
- * @base:  Offset, into @bdev, of the database
+ * @state: Partition check state including device holding the LDM Database
+ * @base:  Offset, into @state->bdev, of the database
  * @ldb:   Cache of the database structures
  *
  * Find and compare the four tables of contents of the LDM Database stored on
- * @bdev and return the parsed information into @toc1.
+ * @state->bdev and return the parsed information into @toc1.
  *
  * The offsets and sizes of the configs are range-checked against a privhead.
  *
  * Return:  'true'   @toc1 contains validated TOCBLOCK info
  *          'false'  @toc1 contents are undefined
  */
-static bool ldm_validate_tocblocks(struct block_device *bdev,
-	unsigned long base, struct ldmdb *ldb)
+static bool ldm_validate_tocblocks(struct parsed_partitions *state,
+				   unsigned long base, struct ldmdb *ldb)
 {
 	static const int off[4] = { OFF_TOCB1, OFF_TOCB2, OFF_TOCB3, OFF_TOCB4};
 	struct tocblock *tb[4];
@@ -420,7 +420,7 @@
 	int i, nr_tbs;
 	bool result = false;
 
-	BUG_ON(!bdev || !ldb);
+	BUG_ON(!state || !ldb);
 	ph = &ldb->ph;
 	tb[0] = &ldb->toc;
 	tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
@@ -437,7 +437,7 @@
 	 * skip any that fail as long as we get at least one valid TOCBLOCK.
 	 */
 	for (nr_tbs = i = 0; i < 4; i++) {
-		data = read_dev_sector(bdev, base + off[i], &sect);
+		data = read_part_sector(state, base + off[i], &sect);
 		if (!data) {
 			ldm_error("Disk read failed for TOCBLOCK %d.", i);
 			continue;
@@ -473,7 +473,7 @@
 
 /**
  * ldm_validate_vmdb - Read the VMDB and validate it
- * @bdev:  Device holding the LDM Database
+ * @state: Partition check state including device holding the LDM Database
  * @base:  Offset, into @bdev, of the database
  * @ldb:   Cache of the database structures
  *
@@ -483,8 +483,8 @@
  * Return:  'true'   @ldb contains validated VBDB info
  *          'false'  @ldb contents are undefined
  */
-static bool ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
-			       struct ldmdb *ldb)
+static bool ldm_validate_vmdb(struct parsed_partitions *state,
+			      unsigned long base, struct ldmdb *ldb)
 {
 	Sector sect;
 	u8 *data;
@@ -492,12 +492,12 @@
 	struct vmdb *vm;
 	struct tocblock *toc;
 
-	BUG_ON (!bdev || !ldb);
+	BUG_ON (!state || !ldb);
 
 	vm  = &ldb->vm;
 	toc = &ldb->toc;
 
-	data = read_dev_sector (bdev, base + OFF_VMDB, &sect);
+	data = read_part_sector(state, base + OFF_VMDB, &sect);
 	if (!data) {
 		ldm_crit ("Disk read failed.");
 		return false;
@@ -534,21 +534,21 @@
 
 /**
  * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk
- * @bdev:  Device holding the LDM Database
+ * @state: Partition check state including device holding the LDM Database
  *
  * This function provides a weak test to decide whether the device is a dynamic
  * disk or not.  It looks for an MS-DOS-style partition table containing at
  * least one partition of type 0x42 (formerly SFS, now used by Windows for
  * dynamic disks).
  *
- * N.B.  The only possible error can come from the read_dev_sector and that is
+ * N.B.  The only possible error can come from the read_part_sector and that is
  *       only likely to happen if the underlying device is strange.  If that IS
  *       the case we should return zero to let someone else try.
  *
- * Return:  'true'   @bdev is a dynamic disk
- *          'false'  @bdev is not a dynamic disk, or an error occurred
+ * Return:  'true'   @state->bdev is a dynamic disk
+ *          'false'  @state->bdev is not a dynamic disk, or an error occurred
  */
-static bool ldm_validate_partition_table (struct block_device *bdev)
+static bool ldm_validate_partition_table(struct parsed_partitions *state)
 {
 	Sector sect;
 	u8 *data;
@@ -556,9 +556,9 @@
 	int i;
 	bool result = false;
 
-	BUG_ON (!bdev);
+	BUG_ON(!state);
 
-	data = read_dev_sector (bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data) {
 		ldm_crit ("Disk read failed.");
 		return false;
@@ -1391,8 +1391,8 @@
 
 /**
  * ldm_get_vblks - Read the on-disk database of VBLKs into memory
- * @bdev:  Device holding the LDM Database
- * @base:  Offset, into @bdev, of the database
+ * @state: Partition check state including device holding the LDM Database
+ * @base:  Offset, into @state->bdev, of the database
  * @ldb:   Cache of the database structures
  *
  * To use the information from the VBLKs, they need to be read from the disk,
@@ -1401,8 +1401,8 @@
  * Return:  'true'   All the VBLKs were read successfully
  *          'false'  An error occurred
  */
-static bool ldm_get_vblks (struct block_device *bdev, unsigned long base,
-			   struct ldmdb *ldb)
+static bool ldm_get_vblks(struct parsed_partitions *state, unsigned long base,
+			  struct ldmdb *ldb)
 {
 	int size, perbuf, skip, finish, s, v, recs;
 	u8 *data = NULL;
@@ -1410,7 +1410,7 @@
 	bool result = false;
 	LIST_HEAD (frags);
 
-	BUG_ON (!bdev || !ldb);
+	BUG_ON(!state || !ldb);
 
 	size   = ldb->vm.vblk_size;
 	perbuf = 512 / size;
@@ -1418,7 +1418,7 @@
 	finish = (size * ldb->vm.last_vblk_seq) >> 9;
 
 	for (s = skip; s < finish; s++) {		/* For each sector */
-		data = read_dev_sector (bdev, base + OFF_VMDB + s, &sect);
+		data = read_part_sector(state, base + OFF_VMDB + s, &sect);
 		if (!data) {
 			ldm_crit ("Disk read failed.");
 			goto out;
@@ -1474,8 +1474,7 @@
 
 /**
  * ldm_partition - Find out whether a device is a dynamic disk and handle it
- * @pp:    List of the partitions parsed so far
- * @bdev:  Device holding the LDM Database
+ * @state: Partition check state including device holding the LDM Database
  *
  * This determines whether the device @bdev is a dynamic disk and if so creates
  * the partitions necessary in the gendisk structure pointed to by @hd.
@@ -1485,21 +1484,21 @@
  * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
  * and so on: the actual data containing partitions.
  *
- * Return:  1 Success, @bdev is a dynamic disk and we handled it
- *          0 Success, @bdev is not a dynamic disk
+ * Return:  1 Success, @state->bdev is a dynamic disk and we handled it
+ *          0 Success, @state->bdev is not a dynamic disk
  *         -1 An error occurred before enough information had been read
- *            Or @bdev is a dynamic disk, but it may be corrupted
+ *            Or @state->bdev is a dynamic disk, but it may be corrupted
  */
-int ldm_partition (struct parsed_partitions *pp, struct block_device *bdev)
+int ldm_partition(struct parsed_partitions *state)
 {
 	struct ldmdb  *ldb;
 	unsigned long base;
 	int result = -1;
 
-	BUG_ON (!pp || !bdev);
+	BUG_ON(!state);
 
 	/* Look for signs of a Dynamic Disk */
-	if (!ldm_validate_partition_table (bdev))
+	if (!ldm_validate_partition_table(state))
 		return 0;
 
 	ldb = kmalloc (sizeof (*ldb), GFP_KERNEL);
@@ -1509,15 +1508,15 @@
 	}
 
 	/* Parse and check privheads. */
-	if (!ldm_validate_privheads (bdev, &ldb->ph))
+	if (!ldm_validate_privheads(state, &ldb->ph))
 		goto out;		/* Already logged */
 
 	/* All further references are relative to base (database start). */
 	base = ldb->ph.config_start;
 
 	/* Parse and check tocs and vmdb. */
-	if (!ldm_validate_tocblocks (bdev, base, ldb) ||
-	    !ldm_validate_vmdb      (bdev, base, ldb))
+	if (!ldm_validate_tocblocks(state, base, ldb) ||
+	    !ldm_validate_vmdb(state, base, ldb))
 	    	goto out;		/* Already logged */
 
 	/* Initialize vblk lists in ldmdb struct */
@@ -1527,13 +1526,13 @@
 	INIT_LIST_HEAD (&ldb->v_comp);
 	INIT_LIST_HEAD (&ldb->v_part);
 
-	if (!ldm_get_vblks (bdev, base, ldb)) {
+	if (!ldm_get_vblks(state, base, ldb)) {
 		ldm_crit ("Failed to read the VBLKs from the database.");
 		goto cleanup;
 	}
 
 	/* Finally, create the data partition devices. */
-	if (ldm_create_data_partitions (pp, ldb)) {
+	if (ldm_create_data_partitions(state, ldb)) {
 		ldm_debug ("Parsed LDM database successfully.");
 		result = 1;
 	}
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
index 30e08e8..d1fb50b 100644
--- a/fs/partitions/ldm.h
+++ b/fs/partitions/ldm.h
@@ -209,7 +209,7 @@
 	struct list_head v_part;
 };
 
-int ldm_partition (struct parsed_partitions *state, struct block_device *bdev);
+int ldm_partition(struct parsed_partitions *state);
 
 #endif /* _FS_PT_LDM_H_ */
 
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index d4a0fad..13e27b0 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -27,7 +27,7 @@
 		stg[i] = 0;
 }
 
-int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
+int mac_partition(struct parsed_partitions *state)
 {
 	int slot = 1;
 	Sector sect;
@@ -42,7 +42,7 @@
 	struct mac_driver_desc *md;
 
 	/* Get 0th block and look at the first partition map entry. */
-	md = (struct mac_driver_desc *) read_dev_sector(bdev, 0, &sect);
+	md = read_part_sector(state, 0, &sect);
 	if (!md)
 		return -1;
 	if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
@@ -51,7 +51,7 @@
 	}
 	secsize = be16_to_cpu(md->block_size);
 	put_dev_sector(sect);
-	data = read_dev_sector(bdev, secsize/512, &sect);
+	data = read_part_sector(state, secsize/512, &sect);
 	if (!data)
 		return -1;
 	part = (struct mac_partition *) (data + secsize%512);
@@ -64,7 +64,7 @@
 	for (blk = 1; blk <= blocks_in_map; ++blk) {
 		int pos = blk * secsize;
 		put_dev_sector(sect);
-		data = read_dev_sector(bdev, pos/512, &sect);
+		data = read_part_sector(state, pos/512, &sect);
 		if (!data)
 			return -1;
 		part = (struct mac_partition *) (data + pos%512);
@@ -123,7 +123,8 @@
 	}
 #ifdef CONFIG_PPC_PMAC
 	if (found_root_goodness)
-		note_bootable_part(bdev->bd_dev, found_root, found_root_goodness);
+		note_bootable_part(state->bdev->bd_dev, found_root,
+				   found_root_goodness);
 #endif
 
 	put_dev_sector(sect);
diff --git a/fs/partitions/mac.h b/fs/partitions/mac.h
index bbf26e1..3c7d984 100644
--- a/fs/partitions/mac.h
+++ b/fs/partitions/mac.h
@@ -41,4 +41,4 @@
     /* ... more stuff */
 };
 
-int mac_partition(struct parsed_partitions *state, struct block_device *bdev);
+int mac_partition(struct parsed_partitions *state);
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 90be97f..645a68d 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -64,7 +64,7 @@
 #define AIX_LABEL_MAGIC2	0xC2
 #define AIX_LABEL_MAGIC3	0xD4
 #define AIX_LABEL_MAGIC4	0xC1
-static int aix_magic_present(unsigned char *p, struct block_device *bdev)
+static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
 {
 	struct partition *pt = (struct partition *) (p + 0x1be);
 	Sector sect;
@@ -85,7 +85,7 @@
 			is_extended_partition(pt))
 			return 0;
 	}
-	d = read_dev_sector(bdev, 7, &sect);
+	d = read_part_sector(state, 7, &sect);
 	if (d) {
 		if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M')
 			ret = 1;
@@ -105,15 +105,14 @@
  * only for the actual data partitions.
  */
 
-static void
-parse_extended(struct parsed_partitions *state, struct block_device *bdev,
-			sector_t first_sector, sector_t first_size)
+static void parse_extended(struct parsed_partitions *state,
+			   sector_t first_sector, sector_t first_size)
 {
 	struct partition *p;
 	Sector sect;
 	unsigned char *data;
 	sector_t this_sector, this_size;
-	sector_t sector_size = bdev_logical_block_size(bdev) / 512;
+	sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
 	int loopct = 0;		/* number of links followed
 				   without finding a data partition */
 	int i;
@@ -126,7 +125,7 @@
 			return;
 		if (state->next == state->limit)
 			return;
-		data = read_dev_sector(bdev, this_sector, &sect);
+		data = read_part_sector(state, this_sector, &sect);
 		if (!data)
 			return;
 
@@ -198,9 +197,8 @@
 /* james@bpgc.com: Solaris has a nasty indicator: 0x82 which also
    indicates linux swap.  Be careful before believing this is Solaris. */
 
-static void
-parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
-			sector_t offset, sector_t size, int origin)
+static void parse_solaris_x86(struct parsed_partitions *state,
+			      sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_SOLARIS_X86_PARTITION
 	Sector sect;
@@ -208,7 +206,7 @@
 	int i;
 	short max_nparts;
 
-	v = (struct solaris_x86_vtoc *)read_dev_sector(bdev, offset+1, &sect);
+	v = read_part_sector(state, offset + 1, &sect);
 	if (!v)
 		return;
 	if (le32_to_cpu(v->v_sanity) != SOLARIS_X86_VTOC_SANE) {
@@ -245,16 +243,15 @@
  * Create devices for BSD partitions listed in a disklabel, under a
  * dos-like partition. See parse_extended() for more information.
  */
-static void
-parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin, char *flavour,
-		int max_partitions)
+static void parse_bsd(struct parsed_partitions *state,
+		      sector_t offset, sector_t size, int origin, char *flavour,
+		      int max_partitions)
 {
 	Sector sect;
 	struct bsd_disklabel *l;
 	struct bsd_partition *p;
 
-	l = (struct bsd_disklabel *)read_dev_sector(bdev, offset+1, &sect);
+	l = read_part_sector(state, offset + 1, &sect);
 	if (!l)
 		return;
 	if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) {
@@ -291,33 +288,28 @@
 }
 #endif
 
-static void
-parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin)
+static void parse_freebsd(struct parsed_partitions *state,
+			  sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
-	parse_bsd(state, bdev, offset, size, origin,
-			"bsd", BSD_MAXPARTITIONS);
+	parse_bsd(state, offset, size, origin, "bsd", BSD_MAXPARTITIONS);
 #endif
 }
 
-static void
-parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin)
+static void parse_netbsd(struct parsed_partitions *state,
+			 sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
-	parse_bsd(state, bdev, offset, size, origin,
-			"netbsd", BSD_MAXPARTITIONS);
+	parse_bsd(state, offset, size, origin, "netbsd", BSD_MAXPARTITIONS);
 #endif
 }
 
-static void
-parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin)
+static void parse_openbsd(struct parsed_partitions *state,
+			  sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
-	parse_bsd(state, bdev, offset, size, origin,
-			"openbsd", OPENBSD_MAXPARTITIONS);
+	parse_bsd(state, offset, size, origin, "openbsd",
+		  OPENBSD_MAXPARTITIONS);
 #endif
 }
 
@@ -325,16 +317,15 @@
  * Create devices for Unixware partitions listed in a disklabel, under a
  * dos-like partition. See parse_extended() for more information.
  */
-static void
-parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin)
+static void parse_unixware(struct parsed_partitions *state,
+			   sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_UNIXWARE_DISKLABEL
 	Sector sect;
 	struct unixware_disklabel *l;
 	struct unixware_slice *p;
 
-	l = (struct unixware_disklabel *)read_dev_sector(bdev, offset+29, &sect);
+	l = read_part_sector(state, offset + 29, &sect);
 	if (!l)
 		return;
 	if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC ||
@@ -365,9 +356,8 @@
  * Anand Krishnamurthy <anandk@wiproge.med.ge.com>
  * Rajeev V. Pillai    <rajeevvp@yahoo.com>
  */
-static void
-parse_minix(struct parsed_partitions *state, struct block_device *bdev,
-		sector_t offset, sector_t size, int origin)
+static void parse_minix(struct parsed_partitions *state,
+			sector_t offset, sector_t size, int origin)
 {
 #ifdef CONFIG_MINIX_SUBPARTITION
 	Sector sect;
@@ -375,7 +365,7 @@
 	struct partition *p;
 	int i;
 
-	data = read_dev_sector(bdev, offset, &sect);
+	data = read_part_sector(state, offset, &sect);
 	if (!data)
 		return;
 
@@ -404,8 +394,7 @@
 
 static struct {
 	unsigned char id;
-	void (*parse)(struct parsed_partitions *, struct block_device *,
-			sector_t, sector_t, int);
+	void (*parse)(struct parsed_partitions *, sector_t, sector_t, int);
 } subtypes[] = {
 	{FREEBSD_PARTITION, parse_freebsd},
 	{NETBSD_PARTITION, parse_netbsd},
@@ -417,16 +406,16 @@
 	{0, NULL},
 };
  
-int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
+int msdos_partition(struct parsed_partitions *state)
 {
-	sector_t sector_size = bdev_logical_block_size(bdev) / 512;
+	sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
 	Sector sect;
 	unsigned char *data;
 	struct partition *p;
 	struct fat_boot_sector *fb;
 	int slot;
 
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 		return -1;
 	if (!msdos_magic_present(data + 510)) {
@@ -434,7 +423,7 @@
 		return 0;
 	}
 
-	if (aix_magic_present(data, bdev)) {
+	if (aix_magic_present(state, data)) {
 		put_dev_sector(sect);
 		printk( " [AIX]");
 		return 0;
@@ -503,7 +492,7 @@
 			put_partition(state, slot, start, n);
 
 			printk(" <");
-			parse_extended(state, bdev, start, size);
+			parse_extended(state, start, size);
 			printk(" >");
 			continue;
 		}
@@ -532,8 +521,8 @@
 
 		if (!subtypes[n].parse)
 			continue;
-		subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
-						nr_sects(p)*sector_size, slot);
+		subtypes[n].parse(state, start_sect(p) * sector_size,
+				  nr_sects(p) * sector_size, slot);
 	}
 	put_dev_sector(sect);
 	return 1;
diff --git a/fs/partitions/msdos.h b/fs/partitions/msdos.h
index 01e5e0b..38c781c 100644
--- a/fs/partitions/msdos.h
+++ b/fs/partitions/msdos.h
@@ -4,5 +4,5 @@
 
 #define MSDOS_LABEL_MAGIC		0xAA55
 
-int msdos_partition(struct parsed_partitions *state, struct block_device *bdev);
+int msdos_partition(struct parsed_partitions *state);
 
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
index c05c17bc..fc22b85 100644
--- a/fs/partitions/osf.c
+++ b/fs/partitions/osf.c
@@ -10,7 +10,7 @@
 #include "check.h"
 #include "osf.h"
 
-int osf_partition(struct parsed_partitions *state, struct block_device *bdev)
+int osf_partition(struct parsed_partitions *state)
 {
 	int i;
 	int slot = 1;
@@ -49,7 +49,7 @@
 	} * label;
 	struct d_partition * partition;
 
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 		return -1;
 
diff --git a/fs/partitions/osf.h b/fs/partitions/osf.h
index 427b8ea..20ed231 100644
--- a/fs/partitions/osf.h
+++ b/fs/partitions/osf.h
@@ -4,4 +4,4 @@
 
 #define DISKLABELMAGIC (0x82564557UL)
 
-int osf_partition(struct parsed_partitions *state, struct block_device *bdev);
+int osf_partition(struct parsed_partitions *state);
diff --git a/fs/partitions/sgi.c b/fs/partitions/sgi.c
index ed5ac83..43b1df9 100644
--- a/fs/partitions/sgi.c
+++ b/fs/partitions/sgi.c
@@ -27,7 +27,7 @@
 	__be32 _unused1;			/* Padding */
 };
 
-int sgi_partition(struct parsed_partitions *state, struct block_device *bdev)
+int sgi_partition(struct parsed_partitions *state)
 {
 	int i, csum;
 	__be32 magic;
@@ -39,7 +39,7 @@
 	struct sgi_partition *p;
 	char b[BDEVNAME_SIZE];
 
-	label = (struct sgi_disklabel *) read_dev_sector(bdev, 0, &sect);
+	label = read_part_sector(state, 0, &sect);
 	if (!label)
 		return -1;
 	p = &label->partitions[0];
@@ -57,7 +57,7 @@
 	}
 	if(csum) {
 		printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
-		       bdevname(bdev, b));
+		       bdevname(state->bdev, b));
 		put_dev_sector(sect);
 		return 0;
 	}
diff --git a/fs/partitions/sgi.h b/fs/partitions/sgi.h
index 5d5595c..b9553eb 100644
--- a/fs/partitions/sgi.h
+++ b/fs/partitions/sgi.h
@@ -2,7 +2,7 @@
  *  fs/partitions/sgi.h
  */
 
-extern int sgi_partition(struct parsed_partitions *state, struct block_device *bdev);
+extern int sgi_partition(struct parsed_partitions *state);
 
 #define SGI_LABEL_MAGIC 0x0be5a941
 
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index c95e6a6..a32660e 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -10,7 +10,7 @@
 #include "check.h"
 #include "sun.h"
 
-int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
+int sun_partition(struct parsed_partitions *state)
 {
 	int i;
 	__be16 csum;
@@ -61,7 +61,7 @@
 	int use_vtoc;
 	int nparts;
 
-	label = (struct sun_disklabel *)read_dev_sector(bdev, 0, &sect);
+	label = read_part_sector(state, 0, &sect);
 	if (!label)
 		return -1;
 
@@ -78,7 +78,7 @@
 		csum ^= *ush--;
 	if (csum) {
 		printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
-		       bdevname(bdev, b));
+		       bdevname(state->bdev, b));
 		put_dev_sector(sect);
 		return 0;
 	}
diff --git a/fs/partitions/sun.h b/fs/partitions/sun.h
index 7f864d1..2424baa 100644
--- a/fs/partitions/sun.h
+++ b/fs/partitions/sun.h
@@ -5,4 +5,4 @@
 #define SUN_LABEL_MAGIC          0xDABE
 #define SUN_VTOC_SANITY          0x600DDEEE
 
-int sun_partition(struct parsed_partitions *state, struct block_device *bdev);
+int sun_partition(struct parsed_partitions *state);
diff --git a/fs/partitions/sysv68.c b/fs/partitions/sysv68.c
index 4eba27b..9030c86 100644
--- a/fs/partitions/sysv68.c
+++ b/fs/partitions/sysv68.c
@@ -46,7 +46,7 @@
 };
 
 
-int sysv68_partition(struct parsed_partitions *state, struct block_device *bdev)
+int sysv68_partition(struct parsed_partitions *state)
 {
 	int i, slices;
 	int slot = 1;
@@ -55,7 +55,7 @@
 	struct dkblk0 *b;
 	struct slice *slice;
 
-	data = read_dev_sector(bdev, 0, &sect);
+	data = read_part_sector(state, 0, &sect);
 	if (!data)
 		return -1;
 
@@ -68,7 +68,7 @@
 	i = be32_to_cpu(b->dk_ios.ios_slcblk);
 	put_dev_sector(sect);
 
-	data = read_dev_sector(bdev, i, &sect);
+	data = read_part_sector(state, i, &sect);
 	if (!data)
 		return -1;
 
diff --git a/fs/partitions/sysv68.h b/fs/partitions/sysv68.h
index fa733f6..bf2f5ff 100644
--- a/fs/partitions/sysv68.h
+++ b/fs/partitions/sysv68.h
@@ -1 +1 @@
-extern int sysv68_partition(struct parsed_partitions *state, struct block_device *bdev);
+extern int sysv68_partition(struct parsed_partitions *state);
diff --git a/fs/partitions/ultrix.c b/fs/partitions/ultrix.c
index ec852c1..db9eef2 100644
--- a/fs/partitions/ultrix.c
+++ b/fs/partitions/ultrix.c
@@ -9,7 +9,7 @@
 #include "check.h"
 #include "ultrix.h"
 
-int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev)
+int ultrix_partition(struct parsed_partitions *state)
 {
 	int i;
 	Sector sect;
@@ -26,7 +26,7 @@
 #define PT_MAGIC	0x032957	/* Partition magic number */
 #define PT_VALID	1		/* Indicates if struct is valid */
 
-	data = read_dev_sector(bdev, (16384 - sizeof(*label))/512, &sect);
+	data = read_part_sector(state, (16384 - sizeof(*label))/512, &sect);
 	if (!data)
 		return -1;
 	
diff --git a/fs/partitions/ultrix.h b/fs/partitions/ultrix.h
index a74bf8e..a3cc00b 100644
--- a/fs/partitions/ultrix.h
+++ b/fs/partitions/ultrix.h
@@ -2,4 +2,4 @@
  *  fs/partitions/ultrix.h
  */
 
-int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev);
+int ultrix_partition(struct parsed_partitions *state);
diff --git a/fs/pipe.c b/fs/pipe.c
index 37ba29f..d79872e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/log2.h>
 #include <linux/mount.h>
 #include <linux/pipe_fs_i.h>
 #include <linux/uio.h>
@@ -18,11 +19,18 @@
 #include <linux/pagemap.h>
 #include <linux/audit.h>
 #include <linux/syscalls.h>
+#include <linux/fcntl.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 
 /*
+ * The max size that a non-root user is allowed to grow the pipe. Can
+ * be set by root in /proc/sys/fs/pipe-max-pages
+ */
+unsigned int pipe_max_pages = PIPE_DEF_BUFFERS * 16;
+
+/*
  * We use a start+len construction, which provides full use of the 
  * allocated memory.
  * -- Florian Coosmann (FGC)
@@ -390,7 +398,7 @@
 			if (!buf->len) {
 				buf->ops = NULL;
 				ops->release(pipe, buf);
-				curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
+				curbuf = (curbuf + 1) & (pipe->buffers - 1);
 				pipe->curbuf = curbuf;
 				pipe->nrbufs = --bufs;
 				do_wakeup = 1;
@@ -472,7 +480,7 @@
 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
 	if (pipe->nrbufs && chars != 0) {
 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
-							(PIPE_BUFFERS-1);
+							(pipe->buffers - 1);
 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
 		const struct pipe_buf_operations *ops = buf->ops;
 		int offset = buf->offset + buf->len;
@@ -518,8 +526,8 @@
 			break;
 		}
 		bufs = pipe->nrbufs;
-		if (bufs < PIPE_BUFFERS) {
-			int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
+		if (bufs < pipe->buffers) {
+			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
 			struct pipe_buffer *buf = pipe->bufs + newbuf;
 			struct page *page = pipe->tmp_page;
 			char *src;
@@ -580,7 +588,7 @@
 			if (!total_len)
 				break;
 		}
-		if (bufs < PIPE_BUFFERS)
+		if (bufs < pipe->buffers)
 			continue;
 		if (filp->f_flags & O_NONBLOCK) {
 			if (!ret)
@@ -640,7 +648,7 @@
 			nrbufs = pipe->nrbufs;
 			while (--nrbufs >= 0) {
 				count += pipe->bufs[buf].len;
-				buf = (buf+1) & (PIPE_BUFFERS-1);
+				buf = (buf+1) & (pipe->buffers - 1);
 			}
 			mutex_unlock(&inode->i_mutex);
 
@@ -671,7 +679,7 @@
 	}
 
 	if (filp->f_mode & FMODE_WRITE) {
-		mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
+		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
 		/*
 		 * Most Unices do not set POLLERR for FIFOs but on Linux they
 		 * behave exactly like pipes for poll().
@@ -877,25 +885,32 @@
 
 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
 	if (pipe) {
-		init_waitqueue_head(&pipe->wait);
-		pipe->r_counter = pipe->w_counter = 1;
-		pipe->inode = inode;
+		pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
+		if (pipe->bufs) {
+			init_waitqueue_head(&pipe->wait);
+			pipe->r_counter = pipe->w_counter = 1;
+			pipe->inode = inode;
+			pipe->buffers = PIPE_DEF_BUFFERS;
+			return pipe;
+		}
+		kfree(pipe);
 	}
 
-	return pipe;
+	return NULL;
 }
 
 void __free_pipe_info(struct pipe_inode_info *pipe)
 {
 	int i;
 
-	for (i = 0; i < PIPE_BUFFERS; i++) {
+	for (i = 0; i < pipe->buffers; i++) {
 		struct pipe_buffer *buf = pipe->bufs + i;
 		if (buf->ops)
 			buf->ops->release(pipe, buf);
 	}
 	if (pipe->tmp_page)
 		__free_page(pipe->tmp_page);
+	kfree(pipe->bufs);
 	kfree(pipe);
 }
 
@@ -1094,6 +1109,89 @@
 }
 
 /*
+ * Allocate a new array of pipe buffers and copy the info over. Returns the
+ * pipe size if successful, or return -ERROR on error.
+ */
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+{
+	struct pipe_buffer *bufs;
+
+	/*
+	 * Must be a power-of-2 currently
+	 */
+	if (!is_power_of_2(arg))
+		return -EINVAL;
+
+	/*
+	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
+	 * expect a lot of shrink+grow operations, just free and allocate
+	 * again like we would do for growing. If the pipe currently
+	 * contains more buffers than arg, then return busy.
+	 */
+	if (arg < pipe->nrbufs)
+		return -EBUSY;
+
+	bufs = kcalloc(arg, sizeof(struct pipe_buffer), GFP_KERNEL);
+	if (unlikely(!bufs))
+		return -ENOMEM;
+
+	/*
+	 * The pipe array wraps around, so just start the new one at zero
+	 * and adjust the indexes.
+	 */
+	if (pipe->nrbufs) {
+		const unsigned int tail = pipe->nrbufs & (pipe->buffers - 1);
+		const unsigned int head = pipe->nrbufs - tail;
+
+		if (head)
+			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
+		if (tail)
+			memcpy(bufs + head, pipe->bufs + pipe->curbuf, tail * sizeof(struct pipe_buffer));
+	}
+
+	pipe->curbuf = 0;
+	kfree(pipe->bufs);
+	pipe->bufs = bufs;
+	pipe->buffers = arg;
+	return arg;
+}
+
+long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pipe_inode_info *pipe;
+	long ret;
+
+	pipe = file->f_path.dentry->d_inode->i_pipe;
+	if (!pipe)
+		return -EBADF;
+
+	mutex_lock(&pipe->inode->i_mutex);
+
+	switch (cmd) {
+	case F_SETPIPE_SZ:
+		if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages)
+			return -EINVAL;
+		/*
+		 * The pipe needs to be at least 2 pages large to
+		 * guarantee POSIX behaviour.
+		 */
+		if (arg < 2)
+			return -EINVAL;
+		ret = pipe_set_size(pipe, arg);
+		break;
+	case F_GETPIPE_SZ:
+		ret = pipe->buffers;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&pipe->inode->i_mutex);
+	return ret;
+}
+
+/*
  * pipefs should _never_ be mounted by userland - too much of security hassle,
  * no real gain from having the whole whorehouse mounted. So we don't need
  * any operations on the root directory. However, we need a non-trivial
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 788b580..655a4c5 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,7 +82,7 @@
 
 /*
  * There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats, dqstats structure containing statistics about the lists
+ * and quota formats.
  * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
  * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
  * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
@@ -132,7 +132,9 @@
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
 EXPORT_SYMBOL(dq_data_lock);
 
+#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
 static char *quotatypes[] = INITQFNAMES;
+#endif
 static struct quota_format_type *quota_formats;	/* List of registered formats */
 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 
@@ -226,6 +228,10 @@
 
 struct dqstats dqstats;
 EXPORT_SYMBOL(dqstats);
+#ifdef CONFIG_SMP
+struct dqstats *dqstats_pcpu;
+EXPORT_SYMBOL(dqstats_pcpu);
+#endif
 
 static qsize_t inode_get_rsv_space(struct inode *inode);
 static void __dquot_initialize(struct inode *inode, int type);
@@ -273,7 +279,7 @@
 static inline void put_dquot_last(struct dquot *dquot)
 {
 	list_add_tail(&dquot->dq_free, &free_dquots);
-	dqstats.free_dquots++;
+	dqstats_inc(DQST_FREE_DQUOTS);
 }
 
 static inline void remove_free_dquot(struct dquot *dquot)
@@ -281,7 +287,7 @@
 	if (list_empty(&dquot->dq_free))
 		return;
 	list_del_init(&dquot->dq_free);
-	dqstats.free_dquots--;
+	dqstats_dec(DQST_FREE_DQUOTS);
 }
 
 static inline void put_inuse(struct dquot *dquot)
@@ -289,12 +295,12 @@
 	/* We add to the back of inuse list so we don't have to restart
 	 * when traversing this list and we block */
 	list_add_tail(&dquot->dq_inuse, &inuse_list);
-	dqstats.allocated_dquots++;
+	dqstats_inc(DQST_ALLOC_DQUOTS);
 }
 
 static inline void remove_inuse(struct dquot *dquot)
 {
-	dqstats.allocated_dquots--;
+	dqstats_dec(DQST_ALLOC_DQUOTS);
 	list_del(&dquot->dq_inuse);
 }
 /*
@@ -317,14 +323,23 @@
 	return dquot->dq_sb->dq_op->mark_dirty(dquot);
 }
 
+/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
 int dquot_mark_dquot_dirty(struct dquot *dquot)
 {
+	int ret = 1;
+
+	/* If quota is dirty already, we don't have to acquire dq_list_lock */
+	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+		return 1;
+
 	spin_lock(&dq_list_lock);
-	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
 		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
 				info[dquot->dq_type].dqi_dirty_list);
+		ret = 0;
+	}
 	spin_unlock(&dq_list_lock);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
 
@@ -550,8 +565,8 @@
 			continue;
 		/* Now we have active dquot so we can just increase use count */
 		atomic_inc(&dquot->dq_count);
-		dqstats.lookups++;
 		spin_unlock(&dq_list_lock);
+		dqstats_inc(DQST_LOOKUPS);
 		dqput(old_dquot);
 		old_dquot = dquot;
 		ret = fn(dquot, priv);
@@ -596,8 +611,8 @@
  			 * holding reference so we can safely just increase
 			 * use count */
 			atomic_inc(&dquot->dq_count);
-			dqstats.lookups++;
 			spin_unlock(&dq_list_lock);
+			dqstats_inc(DQST_LOOKUPS);
 			sb->dq_op->write_dquot(dquot);
 			dqput(dquot);
 			spin_lock(&dq_list_lock);
@@ -609,9 +624,7 @@
 		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
 		    && info_dirty(&dqopt->info[cnt]))
 			sb->dq_op->write_info(sb, cnt);
-	spin_lock(&dq_list_lock);
-	dqstats.syncs++;
-	spin_unlock(&dq_list_lock);
+	dqstats_inc(DQST_SYNCS);
 	mutex_unlock(&dqopt->dqonoff_mutex);
 
 	if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
@@ -663,6 +676,22 @@
 	}
 }
 
+static int dqstats_read(unsigned int type)
+{
+	int count = 0;
+#ifdef CONFIG_SMP
+	int cpu;
+	for_each_possible_cpu(cpu)
+		count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
+	/* Statistics reading is racy, but absolute accuracy isn't required */
+	if (count < 0)
+		count = 0;
+#else
+	count = dqstats.stat[type];
+#endif
+	return count;
+}
+
 /*
  * This is called from kswapd when we think we need some
  * more memory
@@ -675,7 +704,7 @@
 		prune_dqcache(nr);
 		spin_unlock(&dq_list_lock);
 	}
-	return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+	return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure;
 }
 
 static struct shrinker dqcache_shrinker = {
@@ -703,10 +732,7 @@
 		BUG();
 	}
 #endif
-	
-	spin_lock(&dq_list_lock);
-	dqstats.drops++;
-	spin_unlock(&dq_list_lock);
+	dqstats_inc(DQST_DROPS);
 we_slept:
 	spin_lock(&dq_list_lock);
 	if (atomic_read(&dquot->dq_count) > 1) {
@@ -823,15 +849,15 @@
 		put_inuse(dquot);
 		/* hash it first so it can be found */
 		insert_dquot_hash(dquot);
-		dqstats.lookups++;
 		spin_unlock(&dq_list_lock);
+		dqstats_inc(DQST_LOOKUPS);
 	} else {
 		if (!atomic_read(&dquot->dq_count))
 			remove_free_dquot(dquot);
 		atomic_inc(&dquot->dq_count);
-		dqstats.cache_hits++;
-		dqstats.lookups++;
 		spin_unlock(&dq_list_lock);
+		dqstats_inc(DQST_CACHE_HITS);
+		dqstats_inc(DQST_LOOKUPS);
 	}
 	/* Wait for dq_lock - after this we know that either dquot_release() is
 	 * already finished or it will be canceled due to dq_count > 1 test */
@@ -1677,16 +1703,19 @@
 
 /*
  * Transfer the number of inode and blocks from one diskquota to an other.
+ * On success, dquot references in transfer_to are consumed and references
+ * to original dquots that need to be released are placed there. On failure,
+ * references are kept untouched.
  *
  * This operation can block, but only after everything is updated
  * A transaction must be started when entering this function.
+ *
  */
-static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask)
+int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 {
 	qsize_t space, cur_space;
 	qsize_t rsv_space = 0;
-	struct dquot *transfer_from[MAXQUOTAS];
-	struct dquot *transfer_to[MAXQUOTAS];
+	struct dquot *transfer_from[MAXQUOTAS] = {};
 	int cnt, ret = 0;
 	char warntype_to[MAXQUOTAS];
 	char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
@@ -1696,19 +1725,12 @@
 	if (IS_NOQUOTA(inode))
 		return 0;
 	/* Initialize the arrays */
-	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		transfer_from[cnt] = NULL;
-		transfer_to[cnt] = NULL;
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		warntype_to[cnt] = QUOTA_NL_NOWARN;
-	}
-	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (mask & (1 << cnt))
-			transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
-	}
 	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
 		up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-		goto put_all;
+		return 0;
 	}
 	spin_lock(&dq_data_lock);
 	cur_space = inode_get_bytes(inode);
@@ -1760,47 +1782,41 @@
 
 	mark_all_dquot_dirty(transfer_from);
 	mark_all_dquot_dirty(transfer_to);
-	/* The reference we got is transferred to the inode */
+	/* Pass back references to put */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-		transfer_to[cnt] = NULL;
-warn_put_all:
+		transfer_to[cnt] = transfer_from[cnt];
+warn:
 	flush_warnings(transfer_to, warntype_to);
 	flush_warnings(transfer_from, warntype_from_inodes);
 	flush_warnings(transfer_from, warntype_from_space);
-put_all:
-	dqput_all(transfer_from);
-	dqput_all(transfer_to);
 	return ret;
 over_quota:
 	spin_unlock(&dq_data_lock);
 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-	/* Clear dquot pointers we don't want to dqput() */
-	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-		transfer_from[cnt] = NULL;
-	goto warn_put_all;
+	goto warn;
 }
+EXPORT_SYMBOL(__dquot_transfer);
 
 /* Wrapper for transferring ownership of an inode for uid/gid only
  * Called from FSXXX_setattr()
  */
 int dquot_transfer(struct inode *inode, struct iattr *iattr)
 {
-	qid_t chid[MAXQUOTAS];
-	unsigned long mask = 0;
+	struct dquot *transfer_to[MAXQUOTAS] = {};
+	struct super_block *sb = inode->i_sb;
+	int ret;
 
-	if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) {
-		mask |= 1 << USRQUOTA;
-		chid[USRQUOTA] = iattr->ia_uid;
-	}
-	if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) {
-		mask |= 1 << GRPQUOTA;
-		chid[GRPQUOTA] = iattr->ia_gid;
-	}
-	if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
-		dquot_initialize(inode);
-		return __dquot_transfer(inode, chid, mask);
-	}
-	return 0;
+	if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
+		return 0;
+
+	if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
+		transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
+	if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
+		transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA);
+
+	ret = __dquot_transfer(inode, transfer_to);
+	dqput_all(transfer_to);
+	return ret;
 }
 EXPORT_SYMBOL(dquot_transfer);
 
@@ -2275,25 +2291,30 @@
 }
 
 /* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 {
 	struct mem_dqblk *dm = &dquot->dq_dqb;
 
+	memset(di, 0, sizeof(*di));
+	di->d_version = FS_DQUOT_VERSION;
+	di->d_flags = dquot->dq_type == USRQUOTA ?
+			XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+	di->d_id = dquot->dq_id;
+
 	spin_lock(&dq_data_lock);
-	di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
-	di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
-	di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
-	di->dqb_ihardlimit = dm->dqb_ihardlimit;
-	di->dqb_isoftlimit = dm->dqb_isoftlimit;
-	di->dqb_curinodes = dm->dqb_curinodes;
-	di->dqb_btime = dm->dqb_btime;
-	di->dqb_itime = dm->dqb_itime;
-	di->dqb_valid = QIF_ALL;
+	di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
+	di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+	di->d_ino_hardlimit = dm->dqb_ihardlimit;
+	di->d_ino_softlimit = dm->dqb_isoftlimit;
+	di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
+	di->d_icount = dm->dqb_curinodes;
+	di->d_btimer = dm->dqb_btime;
+	di->d_itimer = dm->dqb_itime;
 	spin_unlock(&dq_data_lock);
 }
 
 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
-		  struct if_dqblk *di)
+		  struct fs_disk_quota *di)
 {
 	struct dquot *dquot;
 
@@ -2307,51 +2328,70 @@
 }
 EXPORT_SYMBOL(vfs_get_dqblk);
 
+#define VFS_FS_DQ_MASK \
+	(FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
+	 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
+	 FS_DQ_BTIMER | FS_DQ_ITIMER)
+
 /* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 {
 	struct mem_dqblk *dm = &dquot->dq_dqb;
 	int check_blim = 0, check_ilim = 0;
 	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
 
-	if ((di->dqb_valid & QIF_BLIMITS &&
-	     (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
-	      di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
-	    (di->dqb_valid & QIF_ILIMITS &&
-	     (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
-	      di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+	if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+		return -EINVAL;
+
+	if (((di->d_fieldmask & FS_DQ_BSOFT) &&
+	     (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
+	    ((di->d_fieldmask & FS_DQ_BHARD) &&
+	     (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
+	    ((di->d_fieldmask & FS_DQ_ISOFT) &&
+	     (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+	    ((di->d_fieldmask & FS_DQ_IHARD) &&
+	     (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
 		return -ERANGE;
 
 	spin_lock(&dq_data_lock);
-	if (di->dqb_valid & QIF_SPACE) {
-		dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
+	if (di->d_fieldmask & FS_DQ_BCOUNT) {
+		dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
 		check_blim = 1;
 		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
 	}
-	if (di->dqb_valid & QIF_BLIMITS) {
-		dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
-		dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
+
+	if (di->d_fieldmask & FS_DQ_BSOFT)
+		dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
+	if (di->d_fieldmask & FS_DQ_BHARD)
+		dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
+	if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
 		check_blim = 1;
 		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
 	}
-	if (di->dqb_valid & QIF_INODES) {
-		dm->dqb_curinodes = di->dqb_curinodes;
+
+	if (di->d_fieldmask & FS_DQ_ICOUNT) {
+		dm->dqb_curinodes = di->d_icount;
 		check_ilim = 1;
 		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
 	}
-	if (di->dqb_valid & QIF_ILIMITS) {
-		dm->dqb_isoftlimit = di->dqb_isoftlimit;
-		dm->dqb_ihardlimit = di->dqb_ihardlimit;
+
+	if (di->d_fieldmask & FS_DQ_ISOFT)
+		dm->dqb_isoftlimit = di->d_ino_softlimit;
+	if (di->d_fieldmask & FS_DQ_IHARD)
+		dm->dqb_ihardlimit = di->d_ino_hardlimit;
+	if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
 		check_ilim = 1;
 		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
 	}
-	if (di->dqb_valid & QIF_BTIME) {
-		dm->dqb_btime = di->dqb_btime;
+
+	if (di->d_fieldmask & FS_DQ_BTIMER) {
+		dm->dqb_btime = di->d_btimer;
 		check_blim = 1;
 		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
 	}
-	if (di->dqb_valid & QIF_ITIME) {
-		dm->dqb_itime = di->dqb_itime;
+
+	if (di->d_fieldmask & FS_DQ_ITIMER) {
+		dm->dqb_itime = di->d_itimer;
 		check_ilim = 1;
 		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
 	}
@@ -2361,7 +2401,7 @@
 		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
 			dm->dqb_btime = 0;
 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-		} else if (!(di->dqb_valid & QIF_BTIME))
+		} else if (!(di->d_fieldmask & FS_DQ_BTIMER))
 			/* Set grace only if user hasn't provided his own... */
 			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
 	}
@@ -2370,7 +2410,7 @@
 		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
 			dm->dqb_itime = 0;
 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
-		} else if (!(di->dqb_valid & QIF_ITIME))
+		} else if (!(di->d_fieldmask & FS_DQ_ITIMER))
 			/* Set grace only if user hasn't provided his own... */
 			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
 	}
@@ -2386,7 +2426,7 @@
 }
 
 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
-		  struct if_dqblk *di)
+		  struct fs_disk_quota *di)
 {
 	struct dquot *dquot;
 	int rc;
@@ -2465,62 +2505,74 @@
 	.set_dqblk	= vfs_set_dqblk
 };
 
+
+static int do_proc_dqstats(struct ctl_table *table, int write,
+		     void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+#ifdef CONFIG_SMP
+	/* Update global table */
+	unsigned int type = (int *)table->data - dqstats.stat;
+	dqstats.stat[type] = dqstats_read(type);
+#endif
+	return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+
 static ctl_table fs_dqstats_table[] = {
 	{
 		.procname	= "lookups",
-		.data		= &dqstats.lookups,
+		.data		= &dqstats.stat[DQST_LOOKUPS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "drops",
-		.data		= &dqstats.drops,
+		.data		= &dqstats.stat[DQST_DROPS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "reads",
-		.data		= &dqstats.reads,
+		.data		= &dqstats.stat[DQST_READS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "writes",
-		.data		= &dqstats.writes,
+		.data		= &dqstats.stat[DQST_WRITES],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "cache_hits",
-		.data		= &dqstats.cache_hits,
+		.data		= &dqstats.stat[DQST_CACHE_HITS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "allocated_dquots",
-		.data		= &dqstats.allocated_dquots,
+		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "free_dquots",
-		.data		= &dqstats.free_dquots,
+		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 	{
 		.procname	= "syncs",
-		.data		= &dqstats.syncs,
+		.data		= &dqstats.stat[DQST_SYNCS],
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= do_proc_dqstats,
 	},
 #ifdef CONFIG_PRINT_QUOTA_WARNING
 	{
@@ -2572,6 +2624,13 @@
 	if (!dquot_hash)
 		panic("Cannot create dquot hash table");
 
+#ifdef CONFIG_SMP
+	dqstats_pcpu = alloc_percpu(struct dqstats);
+	if (!dqstats_pcpu)
+		panic("Cannot create dquot stats table");
+#endif
+	memset(&dqstats, 0, sizeof(struct dqstats));
+
 	/* Find power-of-two hlist_heads which can fit into allocation */
 	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
 	dq_hash_bits = 0;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95388f9..cfc7882 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -113,8 +113,6 @@
 	struct if_dqinfo info;
 	int ret;
 
-	if (!sb_has_quota_active(sb, type))
-		return -ESRCH;
 	if (!sb->s_qcop->get_info)
 		return -ENOSYS;
 	ret = sb->s_qcop->get_info(sb, type, &info);
@@ -129,43 +127,80 @@
 
 	if (copy_from_user(&info, addr, sizeof(info)))
 		return -EFAULT;
-	if (!sb_has_quota_active(sb, type))
-		return -ESRCH;
 	if (!sb->s_qcop->set_info)
 		return -ENOSYS;
 	return sb->s_qcop->set_info(sb, type, &info);
 }
 
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+{
+	dst->dqb_bhardlimit = src->d_blk_hardlimit;
+	dst->dqb_bsoftlimit = src->d_blk_softlimit;
+	dst->dqb_curspace = src->d_bcount;
+	dst->dqb_ihardlimit = src->d_ino_hardlimit;
+	dst->dqb_isoftlimit = src->d_ino_softlimit;
+	dst->dqb_curinodes = src->d_icount;
+	dst->dqb_btime = src->d_btimer;
+	dst->dqb_itime = src->d_itimer;
+	dst->dqb_valid = QIF_ALL;
+}
+
 static int quota_getquota(struct super_block *sb, int type, qid_t id,
 			  void __user *addr)
 {
+	struct fs_disk_quota fdq;
 	struct if_dqblk idq;
 	int ret;
 
-	if (!sb_has_quota_active(sb, type))
-		return -ESRCH;
 	if (!sb->s_qcop->get_dqblk)
 		return -ENOSYS;
-	ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+	ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
 	if (ret)
 		return ret;
+	copy_to_if_dqblk(&idq, &fdq);
 	if (copy_to_user(addr, &idq, sizeof(idq)))
 		return -EFAULT;
 	return 0;
 }
 
+static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+{
+	dst->d_blk_hardlimit = src->dqb_bhardlimit;
+	dst->d_blk_softlimit  = src->dqb_bsoftlimit;
+	dst->d_bcount = src->dqb_curspace;
+	dst->d_ino_hardlimit = src->dqb_ihardlimit;
+	dst->d_ino_softlimit = src->dqb_isoftlimit;
+	dst->d_icount = src->dqb_curinodes;
+	dst->d_btimer = src->dqb_btime;
+	dst->d_itimer = src->dqb_itime;
+
+	dst->d_fieldmask = 0;
+	if (src->dqb_valid & QIF_BLIMITS)
+		dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+	if (src->dqb_valid & QIF_SPACE)
+		dst->d_fieldmask |= FS_DQ_BCOUNT;
+	if (src->dqb_valid & QIF_ILIMITS)
+		dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+	if (src->dqb_valid & QIF_INODES)
+		dst->d_fieldmask |= FS_DQ_ICOUNT;
+	if (src->dqb_valid & QIF_BTIME)
+		dst->d_fieldmask |= FS_DQ_BTIMER;
+	if (src->dqb_valid & QIF_ITIME)
+		dst->d_fieldmask |= FS_DQ_ITIMER;
+}
+
 static int quota_setquota(struct super_block *sb, int type, qid_t id,
 			  void __user *addr)
 {
+	struct fs_disk_quota fdq;
 	struct if_dqblk idq;
 
 	if (copy_from_user(&idq, addr, sizeof(idq)))
 		return -EFAULT;
-	if (!sb_has_quota_active(sb, type))
-		return -ESRCH;
 	if (!sb->s_qcop->set_dqblk)
 		return -ENOSYS;
-	return sb->s_qcop->set_dqblk(sb, type, id, &idq);
+	copy_from_if_dqblk(&fdq, &idq);
+	return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
 }
 
 static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -199,9 +234,9 @@
 
 	if (copy_from_user(&fdq, addr, sizeof(fdq)))
 		return -EFAULT;
-	if (!sb->s_qcop->set_xquota)
+	if (!sb->s_qcop->set_dqblk)
 		return -ENOSYS;
-	return sb->s_qcop->set_xquota(sb, type, id, &fdq);
+	return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
 }
 
 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
@@ -210,9 +245,9 @@
 	struct fs_disk_quota fdq;
 	int ret;
 
-	if (!sb->s_qcop->get_xquota)
+	if (!sb->s_qcop->get_dqblk)
 		return -ENOSYS;
-	ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+	ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
 	if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
 		return -EFAULT;
 	return ret;
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index f81f4bc..24f0340 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -60,9 +60,17 @@
 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 {
 	struct super_block *sb = info->dqi_sb;
+	ssize_t ret;
 
-	return sb->s_op->quota_write(sb, info->dqi_type, buf,
+	ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
 	       info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+	if (ret != info->dqi_usable_bs) {
+		q_warn(KERN_WARNING "VFS: dquota write failed on "
+			"dev %s\n", sb->s_id);
+		if (ret >= 0)
+			ret = -EIO;
+	}
+	return ret;
 }
 
 /* Remove empty block from list and return it */
@@ -152,7 +160,7 @@
 	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
 	/* No matter whether write succeeds block is out of list */
 	if (write_blk(info, blk, buf) < 0)
-		printk(KERN_ERR
+		q_warn(KERN_ERR
 		       "VFS: Can't write block (%u) with free entries.\n",
 		       blk);
 	return 0;
@@ -244,7 +252,7 @@
 	if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
 		*err = remove_free_dqentry(info, buf, blk);
 		if (*err < 0) {
-			printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
+			q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
 			       "remove block (%u) from entry free list.\n",
 			       blk);
 			goto out_buf;
@@ -268,7 +276,7 @@
 #endif
 	*err = write_blk(info, blk, buf);
 	if (*err < 0) {
-		printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
+		q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
 				"data block %u.\n", blk);
 		goto out_buf;
 	}
@@ -303,7 +311,7 @@
 	} else {
 		ret = read_blk(info, *treeblk, buf);
 		if (ret < 0) {
-			printk(KERN_ERR "VFS: Can't read tree quota block "
+			q_warn(KERN_ERR "VFS: Can't read tree quota block "
 					"%u.\n", *treeblk);
 			goto out_buf;
 		}
@@ -365,7 +373,7 @@
 	if (!dquot->dq_off) {
 		ret = dq_insert_tree(info, dquot);
 		if (ret < 0) {
-			printk(KERN_ERR "VFS: Error %zd occurred while "
+			q_warn(KERN_ERR "VFS: Error %zd occurred while "
 					"creating quota.\n", ret);
 			kfree(ddquot);
 			return ret;
@@ -377,14 +385,14 @@
 	ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
 				    dquot->dq_off);
 	if (ret != info->dqi_entry_size) {
-		printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
+		q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
 		       sb->s_id);
 		if (ret >= 0)
 			ret = -ENOSPC;
 	} else {
 		ret = 0;
 	}
-	dqstats.writes++;
+	dqstats_inc(DQST_WRITES);
 	kfree(ddquot);
 
 	return ret;
@@ -402,14 +410,14 @@
 	if (!buf)
 		return -ENOMEM;
 	if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
-		printk(KERN_ERR "VFS: Quota structure has offset to other "
+		q_warn(KERN_ERR "VFS: Quota structure has offset to other "
 		  "block (%u) than it should (%u).\n", blk,
 		  (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
 		goto out_buf;
 	}
 	ret = read_blk(info, blk, buf);
 	if (ret < 0) {
-		printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
+		q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
 		goto out_buf;
 	}
 	dh = (struct qt_disk_dqdbheader *)buf;
@@ -419,7 +427,7 @@
 		if (ret >= 0)
 			ret = put_free_dqblk(info, buf, blk);
 		if (ret < 0) {
-			printk(KERN_ERR "VFS: Can't move quota data block (%u) "
+			q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
 			  "to free list.\n", blk);
 			goto out_buf;
 		}
@@ -432,14 +440,14 @@
 			/* Insert will write block itself */
 			ret = insert_free_dqentry(info, buf, blk);
 			if (ret < 0) {
-				printk(KERN_ERR "VFS: Can't insert quota data "
+				q_warn(KERN_ERR "VFS: Can't insert quota data "
 				       "block (%u) to free entry list.\n", blk);
 				goto out_buf;
 			}
 		} else {
 			ret = write_blk(info, blk, buf);
 			if (ret < 0) {
-				printk(KERN_ERR "VFS: Can't write quota data "
+				q_warn(KERN_ERR "VFS: Can't write quota data "
 				  "block %u\n", blk);
 				goto out_buf;
 			}
@@ -464,7 +472,7 @@
 		return -ENOMEM;
 	ret = read_blk(info, *blk, buf);
 	if (ret < 0) {
-		printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
+		q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
 		goto out_buf;
 	}
 	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -488,7 +496,7 @@
 		} else {
 			ret = write_blk(info, *blk, buf);
 			if (ret < 0)
-				printk(KERN_ERR "VFS: Can't write quota tree "
+				q_warn(KERN_ERR "VFS: Can't write quota tree "
 				  "block %u.\n", *blk);
 		}
 	}
@@ -521,7 +529,7 @@
 		return -ENOMEM;
 	ret = read_blk(info, blk, buf);
 	if (ret < 0) {
-		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+		q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
 		goto out_buf;
 	}
 	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
@@ -531,7 +539,7 @@
 		ddquot += info->dqi_entry_size;
 	}
 	if (i == qtree_dqstr_in_blk(info)) {
-		printk(KERN_ERR "VFS: Quota for id %u referenced "
+		q_warn(KERN_ERR "VFS: Quota for id %u referenced "
 		  "but not present.\n", dquot->dq_id);
 		ret = -EIO;
 		goto out_buf;
@@ -556,7 +564,7 @@
 		return -ENOMEM;
 	ret = read_blk(info, blk, buf);
 	if (ret < 0) {
-		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+		q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
 		goto out_buf;
 	}
 	ret = 0;
@@ -599,7 +607,7 @@
 		offset = find_dqentry(info, dquot);
 		if (offset <= 0) {	/* Entry not present? */
 			if (offset < 0)
-				printk(KERN_ERR "VFS: Can't read quota "
+				q_warn(KERN_ERR "VFS: Can't read quota "
 				  "structure for id %u.\n", dquot->dq_id);
 			dquot->dq_off = 0;
 			set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -617,7 +625,7 @@
 	if (ret != info->dqi_entry_size) {
 		if (ret >= 0)
 			ret = -EIO;
-		printk(KERN_ERR "VFS: Error while reading quota "
+		q_warn(KERN_ERR "VFS: Error while reading quota "
 				"structure for id %u.\n", dquot->dq_id);
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
 		memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
@@ -634,7 +642,7 @@
 	spin_unlock(&dq_data_lock);
 	kfree(ddquot);
 out:
-	dqstats.reads++;
+	dqstats_inc(DQST_READS);
 	return ret;
 }
 EXPORT_SYMBOL(qtree_read_dquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db..ccc3e71 100644
--- a/fs/quota/quota_tree.h
+++ b/fs/quota/quota_tree.h
@@ -22,4 +22,10 @@
 
 #define QT_TREEOFF	1		/* Offset of tree in file in blocks */
 
+#define q_warn(fmt, args...) \
+do { \
+	if (printk_ratelimit()) \
+		printk(fmt, ## args); \
+} while(0)
+
 #endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 2ae757e..4af344c 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -71,7 +71,7 @@
 	    dquot->dq_dqb.dqb_ihardlimit == 0 &&
 	    dquot->dq_dqb.dqb_isoftlimit == 0)
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
-	dqstats.reads++;
+	dqstats_inc(DQST_READS);
 
 	return 0;
 }
@@ -104,7 +104,7 @@
 	ret = 0;
 
 out:
-	dqstats.writes++;
+	dqstats_inc(DQST_WRITES);
 
 	return ret;
 }
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index e3da02f..135206a 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -63,7 +63,7 @@
 	size = sb->s_op->quota_read(sb, type, (char *)dqhead,
 				    sizeof(struct v2_disk_dqheader), 0);
 	if (size != sizeof(struct v2_disk_dqheader)) {
-		printk(KERN_WARNING "quota_v2: Failed header read:"
+		q_warn(KERN_WARNING "quota_v2: Failed header read:"
 		       " expected=%zd got=%zd\n",
 			sizeof(struct v2_disk_dqheader), size);
 		return 0;
@@ -106,7 +106,7 @@
 	size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
 	       sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
 	if (size != sizeof(struct v2_disk_dqinfo)) {
-		printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
+		q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
 			sb->s_id);
 		return -1;
 	}
@@ -167,7 +167,7 @@
 	size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
 	       sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
 	if (size != sizeof(struct v2_disk_dqinfo)) {
-		printk(KERN_WARNING "Can't write info structure on device %s.\n",
+		q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
 			sb->s_id);
 		return -1;
 	}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index c948534..f47cd21 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -214,7 +214,7 @@
 	return 0;
 }
 
-static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
+int ramfs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct ramfs_fs_info *fsi;
 	struct inode *inode = NULL;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 1d9c127..9977df9 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -147,7 +147,8 @@
 	barrier_done = reiserfs_commit_for_inode(inode);
 	reiserfs_write_unlock(inode->i_sb);
 	if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
-		blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, 
+			BLKDEV_IFL_WAIT);
 	if (barrier_done < 0)
 		return barrier_done;
 	return (err < 0) ? -EIO : 0;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index dc2c65e..0f22fda 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3076,9 +3076,10 @@
 	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
 	depth = reiserfs_write_lock_once(inode->i_sb);
-	if (attr->ia_valid & ATTR_SIZE) {
+	if (is_quota_modification(inode, attr))
 		dquot_initialize(inode);
 
+	if (attr->ia_valid & ATTR_SIZE) {
 		/* version 2 items will be caught by the s_maxbytes check
 		 ** done for us in vmtruncate
 		 */
diff --git a/fs/splice.c b/fs/splice.c
index 9313b61..ac22b00 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -193,8 +193,8 @@
 			break;
 		}
 
-		if (pipe->nrbufs < PIPE_BUFFERS) {
-			int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
+		if (pipe->nrbufs < pipe->buffers) {
+			int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
 			struct pipe_buffer *buf = pipe->bufs + newbuf;
 
 			buf->page = spd->pages[page_nr];
@@ -214,7 +214,7 @@
 
 			if (!--spd->nr_pages)
 				break;
-			if (pipe->nrbufs < PIPE_BUFFERS)
+			if (pipe->nrbufs < pipe->buffers)
 				continue;
 
 			break;
@@ -265,6 +265,36 @@
 	page_cache_release(spd->pages[i]);
 }
 
+/*
+ * Check if we need to grow the arrays holding pages and partial page
+ * descriptions.
+ */
+int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+{
+	if (pipe->buffers <= PIPE_DEF_BUFFERS)
+		return 0;
+
+	spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
+	spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+
+	if (spd->pages && spd->partial)
+		return 0;
+
+	kfree(spd->pages);
+	kfree(spd->partial);
+	return -ENOMEM;
+}
+
+void splice_shrink_spd(struct pipe_inode_info *pipe,
+		       struct splice_pipe_desc *spd)
+{
+	if (pipe->buffers <= PIPE_DEF_BUFFERS)
+		return;
+
+	kfree(spd->pages);
+	kfree(spd->partial);
+}
+
 static int
 __generic_file_splice_read(struct file *in, loff_t *ppos,
 			   struct pipe_inode_info *pipe, size_t len,
@@ -272,8 +302,8 @@
 {
 	struct address_space *mapping = in->f_mapping;
 	unsigned int loff, nr_pages, req_pages;
-	struct page *pages[PIPE_BUFFERS];
-	struct partial_page partial[PIPE_BUFFERS];
+	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[PIPE_DEF_BUFFERS];
 	struct page *page;
 	pgoff_t index, end_index;
 	loff_t isize;
@@ -286,15 +316,18 @@
 		.spd_release = spd_release_page,
 	};
 
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
+
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	loff = *ppos & ~PAGE_CACHE_MASK;
 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
+	nr_pages = min(req_pages, pipe->buffers);
 
 	/*
 	 * Lookup the (hopefully) full range of pages we need.
 	 */
-	spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
+	spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
 	index += spd.nr_pages;
 
 	/*
@@ -335,7 +368,7 @@
 			unlock_page(page);
 		}
 
-		pages[spd.nr_pages++] = page;
+		spd.pages[spd.nr_pages++] = page;
 		index++;
 	}
 
@@ -356,7 +389,7 @@
 		 * this_len is the max we'll use from this page
 		 */
 		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
-		page = pages[page_nr];
+		page = spd.pages[page_nr];
 
 		if (PageReadahead(page))
 			page_cache_async_readahead(mapping, &in->f_ra, in,
@@ -393,8 +426,8 @@
 					error = -ENOMEM;
 					break;
 				}
-				page_cache_release(pages[page_nr]);
-				pages[page_nr] = page;
+				page_cache_release(spd.pages[page_nr]);
+				spd.pages[page_nr] = page;
 			}
 			/*
 			 * page was already under io and is now done, great
@@ -451,8 +484,8 @@
 			len = this_len;
 		}
 
-		partial[page_nr].offset = loff;
-		partial[page_nr].len = this_len;
+		spd.partial[page_nr].offset = loff;
+		spd.partial[page_nr].len = this_len;
 		len -= this_len;
 		loff = 0;
 		spd.nr_pages++;
@@ -464,12 +497,13 @@
 	 * we got, 'nr_pages' is how many pages are in the map.
 	 */
 	while (page_nr < nr_pages)
-		page_cache_release(pages[page_nr++]);
+		page_cache_release(spd.pages[page_nr++]);
 	in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
 
 	if (spd.nr_pages)
-		return splice_to_pipe(pipe, &spd);
+		error = splice_to_pipe(pipe, &spd);
 
+	splice_shrink_spd(pipe, &spd);
 	return error;
 }
 
@@ -560,9 +594,9 @@
 	unsigned int nr_pages;
 	unsigned int nr_freed;
 	size_t offset;
-	struct page *pages[PIPE_BUFFERS];
-	struct partial_page partial[PIPE_BUFFERS];
-	struct iovec vec[PIPE_BUFFERS];
+	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[PIPE_DEF_BUFFERS];
+	struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
 	pgoff_t index;
 	ssize_t res;
 	size_t this_len;
@@ -576,11 +610,22 @@
 		.spd_release = spd_release_page,
 	};
 
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
+
+	res = -ENOMEM;
+	vec = __vec;
+	if (pipe->buffers > PIPE_DEF_BUFFERS) {
+		vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+		if (!vec)
+			goto shrink_ret;
+	}
+
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 	nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-	for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
+	for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
 		struct page *page;
 
 		page = alloc_page(GFP_USER);
@@ -591,7 +636,7 @@
 		this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
 		vec[i].iov_base = (void __user *) page_address(page);
 		vec[i].iov_len = this_len;
-		pages[i] = page;
+		spd.pages[i] = page;
 		spd.nr_pages++;
 		len -= this_len;
 		offset = 0;
@@ -610,11 +655,11 @@
 	nr_freed = 0;
 	for (i = 0; i < spd.nr_pages; i++) {
 		this_len = min_t(size_t, vec[i].iov_len, res);
-		partial[i].offset = 0;
-		partial[i].len = this_len;
+		spd.partial[i].offset = 0;
+		spd.partial[i].len = this_len;
 		if (!this_len) {
-			__free_page(pages[i]);
-			pages[i] = NULL;
+			__free_page(spd.pages[i]);
+			spd.pages[i] = NULL;
 			nr_freed++;
 		}
 		res -= this_len;
@@ -625,13 +670,18 @@
 	if (res > 0)
 		*ppos += res;
 
+shrink_ret:
+	if (vec != __vec)
+		kfree(vec);
+	splice_shrink_spd(pipe, &spd);
 	return res;
 
 err:
 	for (i = 0; i < spd.nr_pages; i++)
-		__free_page(pages[i]);
+		__free_page(spd.pages[i]);
 
-	return error;
+	res = error;
+	goto shrink_ret;
 }
 EXPORT_SYMBOL(default_file_splice_read);
 
@@ -784,7 +834,7 @@
 		if (!buf->len) {
 			buf->ops = NULL;
 			ops->release(pipe, buf);
-			pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
+			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
 			pipe->nrbufs--;
 			if (pipe->inode)
 				sd->need_wakeup = true;
@@ -1211,7 +1261,7 @@
 	 * If we did an incomplete transfer we must release
 	 * the pipe buffers in question:
 	 */
-	for (i = 0; i < PIPE_BUFFERS; i++) {
+	for (i = 0; i < pipe->buffers; i++) {
 		struct pipe_buffer *buf = pipe->bufs + i;
 
 		if (buf->ops) {
@@ -1371,7 +1421,8 @@
  */
 static int get_iovec_page_array(const struct iovec __user *iov,
 				unsigned int nr_vecs, struct page **pages,
-				struct partial_page *partial, int aligned)
+				struct partial_page *partial, int aligned,
+				unsigned int pipe_buffers)
 {
 	int buffers = 0, error = 0;
 
@@ -1414,8 +1465,8 @@
 			break;
 
 		npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		if (npages > PIPE_BUFFERS - buffers)
-			npages = PIPE_BUFFERS - buffers;
+		if (npages > pipe_buffers - buffers)
+			npages = pipe_buffers - buffers;
 
 		error = get_user_pages_fast((unsigned long)base, npages,
 					0, &pages[buffers]);
@@ -1450,7 +1501,7 @@
 		 * or if we mapped the max number of pages that we have
 		 * room for.
 		 */
-		if (error < npages || buffers == PIPE_BUFFERS)
+		if (error < npages || buffers == pipe_buffers)
 			break;
 
 		nr_vecs--;
@@ -1593,8 +1644,8 @@
 			     unsigned long nr_segs, unsigned int flags)
 {
 	struct pipe_inode_info *pipe;
-	struct page *pages[PIPE_BUFFERS];
-	struct partial_page partial[PIPE_BUFFERS];
+	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[PIPE_DEF_BUFFERS];
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.partial = partial,
@@ -1602,17 +1653,25 @@
 		.ops = &user_page_pipe_buf_ops,
 		.spd_release = spd_release_page,
 	};
+	long ret;
 
 	pipe = pipe_info(file->f_path.dentry->d_inode);
 	if (!pipe)
 		return -EBADF;
 
-	spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
-					    flags & SPLICE_F_GIFT);
-	if (spd.nr_pages <= 0)
-		return spd.nr_pages;
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
 
-	return splice_to_pipe(pipe, &spd);
+	spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
+					    spd.partial, flags & SPLICE_F_GIFT,
+					    pipe->buffers);
+	if (spd.nr_pages <= 0)
+		ret = spd.nr_pages;
+	else
+		ret = splice_to_pipe(pipe, &spd);
+
+	splice_shrink_spd(pipe, &spd);
+	return ret;
 }
 
 /*
@@ -1738,13 +1797,13 @@
 	 * Check ->nrbufs without the inode lock first. This function
 	 * is speculative anyways, so missing one is ok.
 	 */
-	if (pipe->nrbufs < PIPE_BUFFERS)
+	if (pipe->nrbufs < pipe->buffers)
 		return 0;
 
 	ret = 0;
 	pipe_lock(pipe);
 
-	while (pipe->nrbufs >= PIPE_BUFFERS) {
+	while (pipe->nrbufs >= pipe->buffers) {
 		if (!pipe->readers) {
 			send_sig(SIGPIPE, current, 0);
 			ret = -EPIPE;
@@ -1810,7 +1869,7 @@
 		 * Cannot make any progress, because either the input
 		 * pipe is empty or the output pipe is full.
 		 */
-		if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
+		if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
 			/* Already processed some buffers, break */
 			if (ret)
 				break;
@@ -1831,7 +1890,7 @@
 		}
 
 		ibuf = ipipe->bufs + ipipe->curbuf;
-		nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
+		nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
 		obuf = opipe->bufs + nbuf;
 
 		if (len >= ibuf->len) {
@@ -1841,7 +1900,7 @@
 			*obuf = *ibuf;
 			ibuf->ops = NULL;
 			opipe->nrbufs++;
-			ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
+			ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
 			ipipe->nrbufs--;
 			input_wakeup = true;
 		} else {
@@ -1914,11 +1973,11 @@
 		 * If we have iterated all input buffers or ran out of
 		 * output room, break.
 		 */
-		if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
+		if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
 			break;
 
-		ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
-		nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
+		ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
+		nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
 
 		/*
 		 * Get a reference to this pipe buffer,
diff --git a/fs/sync.c b/fs/sync.c
index 92b2281..de6a441 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -42,7 +42,7 @@
 	if (wait)
 		sync_inodes_sb(sb);
 	else
-		writeback_inodes_sb(sb);
+		writeback_inodes_sb_locked(sb);
 
 	if (sb->s_op->sync_fs)
 		sb->s_op->sync_fs(sb, wait);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index e9d2935..4e321f7 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -46,9 +46,9 @@
 };
 
 static int
-fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
+fill_read(struct file *file, char *buffer, loff_t off, size_t count)
 {
-	struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+	struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
 	struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
 	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
 	int rc;
@@ -59,7 +59,7 @@
 
 	rc = -EIO;
 	if (attr->read)
-		rc = attr->read(kobj, attr, buffer, off, count);
+		rc = attr->read(file, kobj, attr, buffer, off, count);
 
 	sysfs_put_active(attr_sd);
 
@@ -70,8 +70,7 @@
 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
 {
 	struct bin_buffer *bb = file->private_data;
-	struct dentry *dentry = file->f_path.dentry;
-	int size = dentry->d_inode->i_size;
+	int size = file->f_path.dentry->d_inode->i_size;
 	loff_t offs = *off;
 	int count = min_t(size_t, bytes, PAGE_SIZE);
 	char *temp;
@@ -92,7 +91,7 @@
 
 	mutex_lock(&bb->mutex);
 
-	count = fill_read(dentry, bb->buffer, offs, count);
+	count = fill_read(file, bb->buffer, offs, count);
 	if (count < 0) {
 		mutex_unlock(&bb->mutex);
 		goto out_free;
@@ -117,9 +116,9 @@
 }
 
 static int
-flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
+flush_write(struct file *file, char *buffer, loff_t offset, size_t count)
 {
-	struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+	struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
 	struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
 	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
 	int rc;
@@ -130,7 +129,7 @@
 
 	rc = -EIO;
 	if (attr->write)
-		rc = attr->write(kobj, attr, buffer, offset, count);
+		rc = attr->write(file, kobj, attr, buffer, offset, count);
 
 	sysfs_put_active(attr_sd);
 
@@ -141,8 +140,7 @@
 		     size_t bytes, loff_t *off)
 {
 	struct bin_buffer *bb = file->private_data;
-	struct dentry *dentry = file->f_path.dentry;
-	int size = dentry->d_inode->i_size;
+	int size = file->f_path.dentry->d_inode->i_size;
 	loff_t offs = *off;
 	int count = min_t(size_t, bytes, PAGE_SIZE);
 	char *temp;
@@ -165,7 +163,7 @@
 
 	memcpy(bb->buffer, temp, count);
 
-	count = flush_write(dentry, bb->buffer, offs, count);
+	count = flush_write(file, bb->buffer, offs, count);
 	mutex_unlock(&bb->mutex);
 
 	if (count > 0)
@@ -363,7 +361,7 @@
 	if (!attr->mmap)
 		goto out_put;
 
-	rc = attr->mmap(kobj, attr, vma);
+	rc = attr->mmap(file, kobj, attr, vma);
 	if (rc)
 		goto out_put;
 
@@ -501,7 +499,7 @@
 void sysfs_remove_bin_file(struct kobject *kobj,
 			   const struct bin_attribute *attr)
 {
-	sysfs_hash_and_remove(kobj->sd, attr->attr.name);
+	sysfs_hash_and_remove(kobj->sd, NULL, attr->attr.name);
 }
 
 EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 5907178..7e54bac 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -380,7 +380,7 @@
 {
 	struct sysfs_inode_attrs *ps_iattr;
 
-	if (sysfs_find_dirent(acxt->parent_sd, sd->s_name))
+	if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name))
 		return -EEXIST;
 
 	sd->s_parent = sysfs_get(acxt->parent_sd);
@@ -533,13 +533,17 @@
  *	Pointer to sysfs_dirent if found, NULL if not.
  */
 struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+				       const void *ns,
 				       const unsigned char *name)
 {
 	struct sysfs_dirent *sd;
 
-	for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling)
+	for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling) {
+		if (ns && sd->s_ns && (sd->s_ns != ns))
+			continue;
 		if (!strcmp(sd->s_name, name))
 			return sd;
+	}
 	return NULL;
 }
 
@@ -558,12 +562,13 @@
  *	Pointer to sysfs_dirent if found, NULL if not.
  */
 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+				      const void *ns,
 				      const unsigned char *name)
 {
 	struct sysfs_dirent *sd;
 
 	mutex_lock(&sysfs_mutex);
-	sd = sysfs_find_dirent(parent_sd, name);
+	sd = sysfs_find_dirent(parent_sd, ns, name);
 	sysfs_get(sd);
 	mutex_unlock(&sysfs_mutex);
 
@@ -572,7 +577,8 @@
 EXPORT_SYMBOL_GPL(sysfs_get_dirent);
 
 static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
-		      const char *name, struct sysfs_dirent **p_sd)
+	enum kobj_ns_type type, const void *ns, const char *name,
+	struct sysfs_dirent **p_sd)
 {
 	umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
 	struct sysfs_addrm_cxt acxt;
@@ -583,6 +589,9 @@
 	sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
 	if (!sd)
 		return -ENOMEM;
+
+	sd->s_flags |= (type << SYSFS_NS_TYPE_SHIFT);
+	sd->s_ns = ns;
 	sd->s_dir.kobj = kobj;
 
 	/* link in */
@@ -601,7 +610,33 @@
 int sysfs_create_subdir(struct kobject *kobj, const char *name,
 			struct sysfs_dirent **p_sd)
 {
-	return create_dir(kobj, kobj->sd, name, p_sd);
+	return create_dir(kobj, kobj->sd,
+			  KOBJ_NS_TYPE_NONE, NULL, name, p_sd);
+}
+
+/**
+ *	sysfs_read_ns_type: return associated ns_type
+ *	@kobj: the kobject being queried
+ *
+ *	Each kobject can be tagged with exactly one namespace type
+ *	(i.e. network or user).  Return the ns_type associated with
+ *	this object if any
+ */
+static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj)
+{
+	const struct kobj_ns_type_operations *ops;
+	enum kobj_ns_type type;
+
+	ops = kobj_child_ns_ops(kobj);
+	if (!ops)
+		return KOBJ_NS_TYPE_NONE;
+
+	type = ops->type;
+	BUG_ON(type <= KOBJ_NS_TYPE_NONE);
+	BUG_ON(type >= KOBJ_NS_TYPES);
+	BUG_ON(!kobj_ns_type_registered(type));
+
+	return type;
 }
 
 /**
@@ -610,7 +645,9 @@
  */
 int sysfs_create_dir(struct kobject * kobj)
 {
+	enum kobj_ns_type type;
 	struct sysfs_dirent *parent_sd, *sd;
+	const void *ns = NULL;
 	int error = 0;
 
 	BUG_ON(!kobj);
@@ -620,7 +657,11 @@
 	else
 		parent_sd = &sysfs_root;
 
-	error = create_dir(kobj, parent_sd, kobject_name(kobj), &sd);
+	if (sysfs_ns_type(parent_sd))
+		ns = kobj->ktype->namespace(kobj);
+	type = sysfs_read_ns_type(kobj);
+
+	error = create_dir(kobj, parent_sd, type, ns, kobject_name(kobj), &sd);
 	if (!error)
 		kobj->sd = sd;
 	return error;
@@ -630,13 +671,19 @@
 				struct nameidata *nd)
 {
 	struct dentry *ret = NULL;
-	struct sysfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
+	struct dentry *parent = dentry->d_parent;
+	struct sysfs_dirent *parent_sd = parent->d_fsdata;
 	struct sysfs_dirent *sd;
 	struct inode *inode;
+	enum kobj_ns_type type;
+	const void *ns;
 
 	mutex_lock(&sysfs_mutex);
 
-	sd = sysfs_find_dirent(parent_sd, dentry->d_name.name);
+	type = sysfs_ns_type(parent_sd);
+	ns = sysfs_info(dir->i_sb)->ns[type];
+
+	sd = sysfs_find_dirent(parent_sd, ns, dentry->d_name.name);
 
 	/* no such entry */
 	if (!sd) {
@@ -735,7 +782,8 @@
 }
 
 int sysfs_rename(struct sysfs_dirent *sd,
-	struct sysfs_dirent *new_parent_sd, const char *new_name)
+	struct sysfs_dirent *new_parent_sd, const void *new_ns,
+	const char *new_name)
 {
 	const char *dup_name = NULL;
 	int error;
@@ -743,12 +791,12 @@
 	mutex_lock(&sysfs_mutex);
 
 	error = 0;
-	if ((sd->s_parent == new_parent_sd) &&
+	if ((sd->s_parent == new_parent_sd) && (sd->s_ns == new_ns) &&
 	    (strcmp(sd->s_name, new_name) == 0))
 		goto out;	/* nothing to rename */
 
 	error = -EEXIST;
-	if (sysfs_find_dirent(new_parent_sd, new_name))
+	if (sysfs_find_dirent(new_parent_sd, new_ns, new_name))
 		goto out;
 
 	/* rename sysfs_dirent */
@@ -770,6 +818,7 @@
 		sd->s_parent = new_parent_sd;
 		sysfs_link_sibling(sd);
 	}
+	sd->s_ns = new_ns;
 
 	error = 0;
  out:
@@ -780,19 +829,28 @@
 
 int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
 {
-	return sysfs_rename(kobj->sd, kobj->sd->s_parent, new_name);
+	struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
+	const void *new_ns = NULL;
+
+	if (sysfs_ns_type(parent_sd))
+		new_ns = kobj->ktype->namespace(kobj);
+
+	return sysfs_rename(kobj->sd, parent_sd, new_ns, new_name);
 }
 
 int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
 {
 	struct sysfs_dirent *sd = kobj->sd;
 	struct sysfs_dirent *new_parent_sd;
+	const void *new_ns = NULL;
 
 	BUG_ON(!sd->s_parent);
+	if (sysfs_ns_type(sd->s_parent))
+		new_ns = kobj->ktype->namespace(kobj);
 	new_parent_sd = new_parent_kobj && new_parent_kobj->sd ?
 		new_parent_kobj->sd : &sysfs_root;
 
-	return sysfs_rename(sd, new_parent_sd, sd->s_name);
+	return sysfs_rename(sd, new_parent_sd, new_ns, sd->s_name);
 }
 
 /* Relationship between s_mode and the DT_xxx types */
@@ -807,32 +865,35 @@
 	return 0;
 }
 
-static struct sysfs_dirent *sysfs_dir_pos(struct sysfs_dirent *parent_sd,
-	ino_t ino, struct sysfs_dirent *pos)
+static struct sysfs_dirent *sysfs_dir_pos(const void *ns,
+	struct sysfs_dirent *parent_sd,	ino_t ino, struct sysfs_dirent *pos)
 {
 	if (pos) {
 		int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) &&
 			pos->s_parent == parent_sd &&
 			ino == pos->s_ino;
 		sysfs_put(pos);
-		if (valid)
-			return pos;
+		if (!valid)
+			pos = NULL;
 	}
-	pos = NULL;
-	if ((ino > 1) && (ino < INT_MAX)) {
+	if (!pos && (ino > 1) && (ino < INT_MAX)) {
 		pos = parent_sd->s_dir.children;
 		while (pos && (ino > pos->s_ino))
 			pos = pos->s_sibling;
 	}
+	while (pos && pos->s_ns && pos->s_ns != ns)
+		pos = pos->s_sibling;
 	return pos;
 }
 
-static struct sysfs_dirent *sysfs_dir_next_pos(struct sysfs_dirent *parent_sd,
-	ino_t ino, struct sysfs_dirent *pos)
+static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
+	struct sysfs_dirent *parent_sd,	ino_t ino, struct sysfs_dirent *pos)
 {
-	pos = sysfs_dir_pos(parent_sd, ino, pos);
+	pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
 	if (pos)
 		pos = pos->s_sibling;
+	while (pos && pos->s_ns && pos->s_ns != ns)
+		pos = pos->s_sibling;
 	return pos;
 }
 
@@ -841,8 +902,13 @@
 	struct dentry *dentry = filp->f_path.dentry;
 	struct sysfs_dirent * parent_sd = dentry->d_fsdata;
 	struct sysfs_dirent *pos = filp->private_data;
+	enum kobj_ns_type type;
+	const void *ns;
 	ino_t ino;
 
+	type = sysfs_ns_type(parent_sd);
+	ns = sysfs_info(dentry->d_sb)->ns[type];
+
 	if (filp->f_pos == 0) {
 		ino = parent_sd->s_ino;
 		if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
@@ -857,9 +923,9 @@
 			filp->f_pos++;
 	}
 	mutex_lock(&sysfs_mutex);
-	for (pos = sysfs_dir_pos(parent_sd, filp->f_pos, pos);
+	for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
 	     pos;
-	     pos = sysfs_dir_next_pos(parent_sd, filp->f_pos, pos)) {
+	     pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
 		const char * name;
 		unsigned int type;
 		int len, ret;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index e222b25..1beaa73 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -478,9 +478,12 @@
 	mutex_lock(&sysfs_mutex);
 
 	if (sd && dir)
-		sd = sysfs_find_dirent(sd, dir);
+		/* Only directories are tagged, so no need to pass
+		 * a tag explicitly.
+		 */
+		sd = sysfs_find_dirent(sd, NULL, dir);
 	if (sd && attr)
-		sd = sysfs_find_dirent(sd, attr);
+		sd = sysfs_find_dirent(sd, NULL, attr);
 	if (sd)
 		sysfs_notify_dirent(sd);
 
@@ -569,7 +572,7 @@
 	int error;
 
 	if (group)
-		dir_sd = sysfs_get_dirent(kobj->sd, group);
+		dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
 	else
 		dir_sd = sysfs_get(kobj->sd);
 
@@ -599,7 +602,7 @@
 	mutex_lock(&sysfs_mutex);
 
 	rc = -ENOENT;
-	sd = sysfs_find_dirent(kobj->sd, attr->name);
+	sd = sysfs_find_dirent(kobj->sd, NULL, attr->name);
 	if (!sd)
 		goto out;
 
@@ -624,7 +627,7 @@
 
 void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
 {
-	sysfs_hash_and_remove(kobj->sd, attr->name);
+	sysfs_hash_and_remove(kobj->sd, NULL, attr->name);
 }
 
 void sysfs_remove_files(struct kobject * kobj, const struct attribute **ptr)
@@ -646,11 +649,11 @@
 	struct sysfs_dirent *dir_sd;
 
 	if (group)
-		dir_sd = sysfs_get_dirent(kobj->sd, group);
+		dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
 	else
 		dir_sd = sysfs_get(kobj->sd);
 	if (dir_sd) {
-		sysfs_hash_and_remove(dir_sd, attr->name);
+		sysfs_hash_and_remove(dir_sd, NULL, attr->name);
 		sysfs_put(dir_sd);
 	}
 }
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index fe61194..23c1e59 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -23,7 +23,7 @@
 	int i;
 
 	for (i = 0, attr = grp->attrs; *attr; i++, attr++)
-		sysfs_hash_and_remove(dir_sd, (*attr)->name);
+		sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
 }
 
 static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
@@ -39,7 +39,7 @@
 		 * visibility.  Do this by first removing then
 		 * re-adding (if required) the file */
 		if (update)
-			sysfs_hash_and_remove(dir_sd, (*attr)->name);
+			sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
 		if (grp->is_visible) {
 			mode = grp->is_visible(kobj, *attr, i);
 			if (!mode)
@@ -132,7 +132,7 @@
 	struct sysfs_dirent *sd;
 
 	if (grp->name) {
-		sd = sysfs_get_dirent(dir_sd, grp->name);
+		sd = sysfs_get_dirent(dir_sd, NULL, grp->name);
 		if (!sd) {
 			WARN(!sd, KERN_WARNING "sysfs group %p not found for "
 				"kobject '%s'\n", grp, kobject_name(kobj));
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index a4a0a941..bbd77e9 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -324,7 +324,7 @@
 	sysfs_put(sd);
 }
 
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name)
 {
 	struct sysfs_addrm_cxt acxt;
 	struct sysfs_dirent *sd;
@@ -334,7 +334,9 @@
 
 	sysfs_addrm_start(&acxt, dir_sd);
 
-	sd = sysfs_find_dirent(dir_sd, name);
+	sd = sysfs_find_dirent(dir_sd, ns, name);
+	if (sd && (sd->s_ns != ns))
+		sd = NULL;
 	if (sd)
 		sysfs_remove_one(&acxt, sd);
 
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 7761378..281c0c9 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -35,7 +35,7 @@
 struct sysfs_dirent sysfs_root = {
 	.s_name		= "",
 	.s_count	= ATOMIC_INIT(1),
-	.s_flags	= SYSFS_DIR,
+	.s_flags	= SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
 	.s_mode		= S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
 	.s_ino		= 1,
 };
@@ -72,18 +72,107 @@
 	return 0;
 }
 
+static int sysfs_test_super(struct super_block *sb, void *data)
+{
+	struct sysfs_super_info *sb_info = sysfs_info(sb);
+	struct sysfs_super_info *info = data;
+	enum kobj_ns_type type;
+	int found = 1;
+
+	for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
+		if (sb_info->ns[type] != info->ns[type])
+			found = 0;
+	}
+	return found;
+}
+
+static int sysfs_set_super(struct super_block *sb, void *data)
+{
+	int error;
+	error = set_anon_super(sb, data);
+	if (!error)
+		sb->s_fs_info = data;
+	return error;
+}
+
 static int sysfs_get_sb(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
 {
-	return get_sb_single(fs_type, flags, data, sysfs_fill_super, mnt);
+	struct sysfs_super_info *info;
+	enum kobj_ns_type type;
+	struct super_block *sb;
+	int error;
+
+	error = -ENOMEM;
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto out;
+
+	for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
+		info->ns[type] = kobj_ns_current(type);
+
+	sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
+	if (IS_ERR(sb) || sb->s_fs_info != info)
+		kfree(info);
+	if (IS_ERR(sb)) {
+		error = PTR_ERR(sb);
+		goto out;
+	}
+	if (!sb->s_root) {
+		sb->s_flags = flags;
+		error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+		if (error) {
+			deactivate_locked_super(sb);
+			goto out;
+		}
+		sb->s_flags |= MS_ACTIVE;
+	}
+
+	simple_set_mnt(mnt, sb);
+	error = 0;
+out:
+	return error;
+}
+
+static void sysfs_kill_sb(struct super_block *sb)
+{
+	struct sysfs_super_info *info = sysfs_info(sb);
+
+	/* Remove the superblock from fs_supers/s_instances
+	 * so we can't find it, before freeing sysfs_super_info.
+	 */
+	kill_anon_super(sb);
+	kfree(info);
 }
 
 static struct file_system_type sysfs_fs_type = {
 	.name		= "sysfs",
 	.get_sb		= sysfs_get_sb,
-	.kill_sb	= kill_anon_super,
+	.kill_sb	= sysfs_kill_sb,
 };
 
+void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
+{
+	struct super_block *sb;
+
+	mutex_lock(&sysfs_mutex);
+	spin_lock(&sb_lock);
+	list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
+		struct sysfs_super_info *info = sysfs_info(sb);
+		/*
+		 * If we see a superblock on the fs_supers/s_instances
+		 * list the unmount has not completed and sb->s_fs_info
+		 * points to a valid struct sysfs_super_info.
+		 */
+		/* Ignore superblocks with the wrong ns */
+		if (info->ns[type] != ns)
+			continue;
+		info->ns[type] = NULL;
+	}
+	spin_unlock(&sb_lock);
+	mutex_unlock(&sysfs_mutex);
+}
+
 int __init sysfs_init(void)
 {
 	int err = -ENOMEM;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 942f239..f71246b 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -58,6 +58,8 @@
 	if (!sd)
 		goto out_put;
 
+	if (sysfs_ns_type(parent_sd))
+		sd->s_ns = target->ktype->namespace(target);
 	sd->s_symlink.target_sd = target_sd;
 	target_sd = NULL;	/* reference is now owned by the symlink */
 
@@ -107,6 +109,26 @@
 }
 
 /**
+ *	sysfs_delete_link - remove symlink in object's directory.
+ *	@kobj:	object we're acting for.
+ *	@targ:	object we're pointing to.
+ *	@name:	name of the symlink to remove.
+ *
+ *	Unlike sysfs_remove_link sysfs_delete_link has enough information
+ *	to successfully delete symlinks in tagged directories.
+ */
+void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
+			const char *name)
+{
+	const void *ns = NULL;
+	spin_lock(&sysfs_assoc_lock);
+	if (targ->sd)
+		ns = targ->sd->s_ns;
+	spin_unlock(&sysfs_assoc_lock);
+	sysfs_hash_and_remove(kobj->sd, ns, name);
+}
+
+/**
  *	sysfs_remove_link - remove symlink in object's directory.
  *	@kobj:	object we're acting for.
  *	@name:	name of the symlink to remove.
@@ -121,7 +143,7 @@
 	else
 		parent_sd = kobj->sd;
 
-	sysfs_hash_and_remove(parent_sd, name);
+	sysfs_hash_and_remove(parent_sd, NULL, name);
 }
 
 /**
@@ -137,6 +159,7 @@
 			const char *old, const char *new)
 {
 	struct sysfs_dirent *parent_sd, *sd = NULL;
+	const void *old_ns = NULL, *new_ns = NULL;
 	int result;
 
 	if (!kobj)
@@ -144,8 +167,11 @@
 	else
 		parent_sd = kobj->sd;
 
+	if (targ->sd)
+		old_ns = targ->sd->s_ns;
+
 	result = -ENOENT;
-	sd = sysfs_get_dirent(parent_sd, old);
+	sd = sysfs_get_dirent(parent_sd, old_ns, old);
 	if (!sd)
 		goto out;
 
@@ -155,7 +181,10 @@
 	if (sd->s_symlink.target_sd->s_dir.kobj != targ)
 		goto out;
 
-	result = sysfs_rename(sd, parent_sd, new);
+	if (sysfs_ns_type(parent_sd))
+		new_ns = targ->ktype->namespace(targ);
+
+	result = sysfs_rename(sd, parent_sd, new_ns, new);
 
 out:
 	sysfs_put(sd);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 30f5a44..6a13105 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -58,6 +58,7 @@
 	struct sysfs_dirent	*s_sibling;
 	const char		*s_name;
 
+	const void		*s_ns; /* namespace tag */
 	union {
 		struct sysfs_elem_dir		s_dir;
 		struct sysfs_elem_symlink	s_symlink;
@@ -81,14 +82,27 @@
 #define SYSFS_COPY_NAME			(SYSFS_DIR | SYSFS_KOBJ_LINK)
 #define SYSFS_ACTIVE_REF		(SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR)
 
-#define SYSFS_FLAG_MASK			~SYSFS_TYPE_MASK
-#define SYSFS_FLAG_REMOVED		0x0200
+/* identify any namespace tag on sysfs_dirents */
+#define SYSFS_NS_TYPE_MASK		0xff00
+#define SYSFS_NS_TYPE_SHIFT		8
+
+#define SYSFS_FLAG_MASK			~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK)
+#define SYSFS_FLAG_REMOVED		0x020000
 
 static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
 {
 	return sd->s_flags & SYSFS_TYPE_MASK;
 }
 
+/*
+ * Return any namespace tags on this dirent.
+ * enum kobj_ns_type is defined in linux/kobject.h
+ */
+static inline enum kobj_ns_type sysfs_ns_type(struct sysfs_dirent *sd)
+{
+	return (sd->s_flags & SYSFS_NS_TYPE_MASK) >> SYSFS_NS_TYPE_SHIFT;
+}
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define sysfs_dirent_init_lockdep(sd)				\
 do {								\
@@ -114,6 +128,16 @@
 /*
  * mount.c
  */
+
+/*
+ * Each sb is associated with a set of namespace tags (i.e.
+ * the network namespace of the task which mounted this sysfs
+ * instance).
+ */
+struct sysfs_super_info {
+	const void *ns[KOBJ_NS_TYPES];
+};
+#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
 extern struct sysfs_dirent sysfs_root;
 extern struct kmem_cache *sysfs_dir_cachep;
 
@@ -137,8 +161,10 @@
 void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
 
 struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+				       const void *ns,
 				       const unsigned char *name);
 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+				      const void *ns,
 				      const unsigned char *name);
 struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
 
@@ -149,7 +175,7 @@
 void sysfs_remove_subdir(struct sysfs_dirent *sd);
 
 int sysfs_rename(struct sysfs_dirent *sd,
-	struct sysfs_dirent *new_parent_sd, const char *new_name);
+	struct sysfs_dirent *new_parent_sd, const void *ns, const char *new_name);
 
 static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
 {
@@ -179,7 +205,7 @@
 int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
 int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 		size_t size, int flags);
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name);
 int sysfs_inode_init(void);
 
 /*
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index f0f2a43..3a84455 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -209,6 +209,6 @@
 const struct file_operations udf_dir_operations = {
 	.read			= generic_read_dir,
 	.readdir		= udf_readdir,
-	.ioctl			= udf_ioctl,
+	.unlocked_ioctl		= udf_ioctl,
 	.fsync			= simple_fsync,
 };
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 4b6a46c..baae3a7 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -37,6 +37,7 @@
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/aio.h>
+#include <linux/smp_lock.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -144,50 +145,60 @@
 	return retval;
 }
 
-int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-	      unsigned long arg)
+long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+	struct inode *inode = filp->f_dentry->d_inode;
 	long old_block, new_block;
 	int result = -EINVAL;
 
+	lock_kernel();
+
 	if (file_permission(filp, MAY_READ) != 0) {
-		udf_debug("no permission to access inode %lu\n",
-			  inode->i_ino);
-		return -EPERM;
+		udf_debug("no permission to access inode %lu\n", inode->i_ino);
+		result = -EPERM;
+		goto out;
 	}
 
 	if (!arg) {
 		udf_debug("invalid argument to udf_ioctl\n");
-		return -EINVAL;
+		result = -EINVAL;
+		goto out;
 	}
 
 	switch (cmd) {
 	case UDF_GETVOLIDENT:
 		if (copy_to_user((char __user *)arg,
 				 UDF_SB(inode->i_sb)->s_volume_ident, 32))
-			return -EFAULT;
+			result = -EFAULT;
 		else
-			return 0;
+			result = 0;
+		goto out;
 	case UDF_RELOCATE_BLOCKS:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-		if (get_user(old_block, (long __user *)arg))
-			return -EFAULT;
+		if (!capable(CAP_SYS_ADMIN)) {
+			result = -EACCES;
+			goto out;
+		}
+		if (get_user(old_block, (long __user *)arg)) {
+			result = -EFAULT;
+			goto out;
+		}
 		result = udf_relocate_blocks(inode->i_sb,
 						old_block, &new_block);
 		if (result == 0)
 			result = put_user(new_block, (long __user *)arg);
-		return result;
+		goto out;
 	case UDF_GETEASIZE:
 		result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
-		break;
+		goto out;
 	case UDF_GETEABLOCK:
 		result = copy_to_user((char __user *)arg,
 				      UDF_I(inode)->i_ext.i_data,
 				      UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
-		break;
+		goto out;
 	}
 
+out:
+	unlock_kernel();
 	return result;
 }
 
@@ -207,7 +218,7 @@
 const struct file_operations udf_file_operations = {
 	.read			= do_sync_read,
 	.aio_read		= generic_file_aio_read,
-	.ioctl			= udf_ioctl,
+	.unlocked_ioctl		= udf_ioctl,
 	.open			= dquot_file_open,
 	.mmap			= generic_file_mmap,
 	.write			= do_sync_write,
@@ -227,7 +238,7 @@
 	if (error)
 		return error;
 
-	if (iattr->ia_valid & ATTR_SIZE)
+	if (is_quota_modification(inode, iattr))
 		dquot_initialize(inode);
 
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 702a114..9079ff7 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -130,8 +130,7 @@
 			uint8_t *, uint8_t *);
 
 /* file.c */
-extern int udf_ioctl(struct inode *, struct file *, unsigned int,
-		     unsigned long);
+extern long udf_ioctl(struct file *, unsigned int, unsigned long);
 extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 80b68c3..cffa756 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -603,7 +603,7 @@
 		if (!inode->i_blocks)
 			inode->i_op = &ufs_fast_symlink_inode_operations;
 		else {
-			inode->i_op = &page_symlink_inode_operations;
+			inode->i_op = &ufs_symlink_inode_operations;
 			inode->i_mapping->a_ops = &ufs_aops;
 		}
 	} else
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 1185562..eabc02e 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -148,7 +148,7 @@
 
 	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
 		/* slow symlink */
-		inode->i_op = &page_symlink_inode_operations;
+		inode->i_op = &ufs_symlink_inode_operations;
 		inode->i_mapping->a_ops = &ufs_aops;
 		err = page_symlink(inode, symname, l);
 		if (err)
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index c0156ed..d283628 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -42,4 +42,12 @@
 const struct inode_operations ufs_fast_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.follow_link	= ufs_follow_link,
+	.setattr	= ufs_setattr,
+};
+
+const struct inode_operations ufs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.setattr	= ufs_setattr,
 };
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index d3b6270..f294c44 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -508,7 +508,7 @@
  * - there is no way to know old size
  * - there is no way inform user about error, if it happens in `truncate'
  */
-static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+int ufs_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = dentry->d_inode;
 	unsigned int ia_valid = attr->ia_valid;
@@ -518,18 +518,18 @@
 	if (error)
 		return error;
 
+	if (is_quota_modification(inode, attr))
+		dquot_initialize(inode);
+
 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
 	    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
 		error = dquot_transfer(inode, attr);
 		if (error)
 			return error;
 	}
-	if (ia_valid & ATTR_SIZE &&
-	    attr->ia_size != i_size_read(inode)) {
+	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
 		loff_t old_i_size = inode->i_size;
 
-		dquot_initialize(inode);
-
 		error = vmtruncate(inode, attr->ia_size);
 		if (error)
 			return error;
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 43f9f5d..179ae6b 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -122,9 +122,11 @@
 
 /* symlink.c */
 extern const struct inode_operations ufs_fast_symlink_inode_operations;
+extern const struct inode_operations ufs_symlink_inode_operations;
 
 /* truncate.c */
 extern int ufs_truncate (struct inode *, loff_t);
+extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
 
 static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
 {
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 1947514..e31bf21 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -97,7 +97,7 @@
 }
 
 STATIC int
-xfs_fs_get_xquota(
+xfs_fs_get_dqblk(
 	struct super_block	*sb,
 	int			type,
 	qid_t			id,
@@ -114,7 +114,7 @@
 }
 
 STATIC int
-xfs_fs_set_xquota(
+xfs_fs_set_dqblk(
 	struct super_block	*sb,
 	int			type,
 	qid_t			id,
@@ -135,6 +135,6 @@
 const struct quotactl_ops xfs_quotactl_operations = {
 	.get_xstate		= xfs_fs_get_xstate,
 	.set_xstate		= xfs_fs_set_xstate,
-	.get_xquota		= xfs_fs_get_xquota,
-	.set_xquota		= xfs_fs_set_xquota,
+	.get_dqblk		= xfs_fs_get_dqblk,
+	.set_dqblk		= xfs_fs_set_dqblk,
 };
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index e900251..f24dbe5 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -725,7 +725,8 @@
 xfs_blkdev_issue_flush(
 	xfs_buftarg_t		*buftarg)
 {
-	blkdev_issue_flush(buftarg->bt_bdev, NULL);
+	blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
+			BLKDEV_IFL_WAIT);
 }
 
 STATIC void
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 26fa431..92b002f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -448,6 +448,9 @@
 	return 0;
 }
 
+#define XFS_DQ_MASK \
+	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+
 /*
  * Adjust quota limits, and start/stop timers accordingly.
  */
@@ -465,9 +468,10 @@
 	int			error;
 	xfs_qcnt_t		hard, soft;
 
-	if ((newlim->d_fieldmask &
-	    (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
-		return (0);
+	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+		return EINVAL;
+	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+		return 0;
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
 	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index e5f234a..97e807c 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -28,7 +28,8 @@
 KMAP_D(16)	KM_IRQ_PTE,
 KMAP_D(17)	KM_NMI,
 KMAP_D(18)	KM_NMI_PTE,
-KMAP_D(19)	KM_TYPE_NR
+KMAP_D(19)	KM_KDB,
+KMAP_D(20)	KM_TYPE_NR
 };
 
 #undef KMAP_D
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index fc0d575..59c3e5b 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -103,6 +103,23 @@
 	unsigned int blocksize;
 };
 
+struct ablkcipher_walk {
+	struct {
+		struct page *page;
+		unsigned int offset;
+	} src, dst;
+
+	struct scatter_walk	in;
+	unsigned int		nbytes;
+	struct scatter_walk	out;
+	unsigned int		total;
+	struct list_head	buffers;
+	u8			*iv_buffer;
+	u8			*iv;
+	int			flags;
+	unsigned int		blocksize;
+};
+
 extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_aead_type;
 extern const struct crypto_type crypto_blkcipher_type;
@@ -173,6 +190,12 @@
 			      struct blkcipher_walk *walk,
 			      unsigned int blocksize);
 
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk, int err);
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk);
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
+
 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 {
 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
@@ -283,6 +306,23 @@
 	walk->total = nbytes;
 }
 
+static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
+					struct scatterlist *dst,
+					struct scatterlist *src,
+					unsigned int nbytes)
+{
+	walk->in.sg = src;
+	walk->out.sg = dst;
+	walk->total = nbytes;
+	INIT_LIST_HEAD(&walk->buffers);
+}
+
+static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+	if (unlikely(!list_empty(&walk->buffers)))
+		__ablkcipher_walk_complete(walk);
+}
+
 static inline struct crypto_async_request *crypto_get_backlog(
 	struct crypto_queue *queue)
 {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2f3b3a0..c1b9871 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1428,10 +1428,13 @@
 /* Graphics Execution Manager library functions (drm_gem.c) */
 int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
+void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
 void drm_gem_object_free_unlocked(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
 					    size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size);
 void drm_gem_object_handle_free(struct kref *kref);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 1347524..93a1a31 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,6 +31,7 @@
 #include <linux/idr.h>
 
 #include <linux/fb.h>
+#include <linux/slow-work.h>
 
 struct drm_device;
 struct drm_mode_set;
@@ -271,8 +272,6 @@
 	unsigned int depth;
 	int bits_per_pixel;
 	int flags;
-	struct fb_info *fbdev;
-	u32 pseudo_palette[17];
 	struct list_head filp_head;
 	/* if you are using the helper */
 	void *helper_private;
@@ -369,9 +368,6 @@
  * @enabled: is this CRTC enabled?
  * @x: x position on screen
  * @y: y position on screen
- * @desired_mode: new desired mode
- * @desired_x: desired x for desired_mode
- * @desired_y: desired y for desired_mode
  * @funcs: CRTC control functions
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
@@ -391,8 +387,6 @@
 	struct drm_display_mode mode;
 
 	int x, y;
-	struct drm_display_mode *desired_mode;
-	int desired_x, desired_y;
 	const struct drm_crtc_funcs *funcs;
 
 	/* CRTC gamma size for reporting to userspace */
@@ -467,6 +461,15 @@
 	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
 };
 
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
 /**
  * drm_connector - central DRM connector control structure
  * @crtc: CRTC this connector is currently connected to, NULL if none
@@ -511,6 +514,8 @@
 	u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
 	uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
 
+	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
 	/* requested DPMS state */
 	int dpms;
 
@@ -521,7 +526,6 @@
 	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
 	uint32_t force_encoder_id;
 	struct drm_encoder *encoder; /* currently active encoder */
-	void *fb_helper_private;
 };
 
 /**
@@ -548,16 +552,10 @@
 
 /**
  * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
- * @resize: adjust CRTCs as necessary for the proposed layout
- *
- * Currently only a resize hook is available.  DRM will call back into the
- * driver with a new screen width and height.  If the driver can't support
- * the proposed size, it can return false.  Otherwise it should adjust
- * the CRTC<->connector mappings as needed and update its view of the screen.
  */
 struct drm_mode_config_funcs {
 	struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
-	int (*fb_changed)(struct drm_device *dev);
+	void (*output_poll_changed)(struct drm_device *dev);
 };
 
 struct drm_mode_group {
@@ -590,14 +588,15 @@
 
 	struct list_head property_list;
 
-	/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
-	struct list_head fb_kernel_list;
-
 	int min_width, min_height;
 	int max_width, max_height;
 	struct drm_mode_config_funcs *funcs;
 	resource_size_t fb_base;
 
+	/* output poll support */
+	bool poll_enabled;
+	struct delayed_slow_work output_poll_slow_work;
+
 	/* pointers to standard properties */
 	struct list_head property_blob_list;
 	struct drm_property *edid_property;
@@ -666,8 +665,6 @@
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
-extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
-				 unsigned char *buf, int len);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -799,8 +796,14 @@
 extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
 				int hdisplay, int vdisplay, int vrefresh,
 				bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+				int hdisplay, int vdisplay, int vrefresh,
+				bool interlaced, int margins, int GTF_M,
+				int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
 				int hdisplay, int vdisplay);
 
 extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+					   int hsize, int vsize, int fresh);
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index b29e201..dc5873c 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,7 +39,6 @@
 
 #include <linux/fb.h>
 
-#include "drm_fb_helper.h"
 struct drm_crtc_helper_funcs {
 	/*
 	 * Control power levels on the CRTC.  If the mode passed in is
@@ -96,8 +95,6 @@
 
 extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
-extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
-extern bool drm_helper_initial_config(struct drm_device *dev);
 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
 extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 				     struct drm_display_mode *mode,
@@ -123,12 +120,14 @@
 	encoder->helper_private = (void *)funcs;
 }
 
-static inline int drm_connector_helper_add(struct drm_connector *connector,
+static inline void drm_connector_helper_add(struct drm_connector *connector,
 					    const struct drm_connector_helper_funcs *funcs)
 {
 	connector->helper_private = (void *)funcs;
-	return drm_fb_helper_add_connector(connector);
 }
 
 extern int drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
 #endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b420989..39e2cc5 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -120,7 +120,7 @@
 		struct detailed_data_string str;
 		struct detailed_data_monitor_range range;
 		struct detailed_data_wpindex color;
-		struct std_timing timings[5];
+		struct std_timing timings[6];
 		struct cvt_timing cvt[4];
 	} data;
 } __attribute__((packed));
@@ -201,7 +201,4 @@
 
 #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
 
-/* define the number of Extension EDID block */
-#define DRM_MAX_EDID_EXT_NUM 4
-
 #endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 58c892a..f0a6afc 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,17 +30,12 @@
 #ifndef DRM_FB_HELPER_H
 #define DRM_FB_HELPER_H
 
+struct drm_fb_helper;
+
 struct drm_fb_helper_crtc {
 	uint32_t crtc_id;
 	struct drm_mode_set mode_set;
-};
-
-
-struct drm_fb_helper_funcs {
-	void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
-			  u16 blue, int regno);
-	void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
-			  u16 *blue, int regno);
+	struct drm_display_mode *desired_mode;
 };
 
 /* mode specified on the command line */
@@ -57,8 +52,28 @@
 	bool margins;
 };
 
+struct drm_fb_helper_surface_size {
+	u32 fb_width;
+	u32 fb_height;
+	u32 surface_width;
+	u32 surface_height;
+	u32 surface_bpp;
+	u32 surface_depth;
+};
+
+struct drm_fb_helper_funcs {
+	void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+			  u16 blue, int regno);
+	void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+			  u16 *blue, int regno);
+
+	int (*fb_probe)(struct drm_fb_helper *helper,
+			struct drm_fb_helper_surface_size *sizes);
+};
+
 struct drm_fb_helper_connector {
 	struct drm_fb_helper_cmdline_mode cmdline_mode;
+	struct drm_connector *connector;
 };
 
 struct drm_fb_helper {
@@ -67,24 +82,26 @@
 	struct drm_display_mode *mode;
 	int crtc_count;
 	struct drm_fb_helper_crtc *crtc_info;
+	int connector_count;
+	struct drm_fb_helper_connector **connector_info;
 	struct drm_fb_helper_funcs *funcs;
 	int conn_limit;
+	struct fb_info *fbdev;
+	u32 pseudo_palette[17];
 	struct list_head kernel_fb_list;
+
+	/* we got a hotplug but fbdev wasn't running the console
+	   delay until next set_par */
+	bool delayed_hotplug;
 };
 
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-				  int preferred_bpp,
-				  int (*fb_create)(struct drm_device *dev,
-						   uint32_t fb_width,
-						   uint32_t fb_height,
-						   uint32_t surface_width,
-						   uint32_t surface_height,
-						   uint32_t surface_depth,
-						   uint32_t surface_bpp,
-						   struct drm_framebuffer **fb_ptr));
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
-				  int max_conn);
-void drm_fb_helper_free(struct drm_fb_helper *helper);
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+				  int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *helper, int crtc_count,
+		       int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
 int drm_fb_helper_blank(int blank, struct fb_info *info);
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 			      struct fb_info *info);
@@ -99,13 +116,15 @@
 			    struct fb_info *info);
 
 void drm_fb_helper_restore(void);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth);
 
-int drm_fb_helper_add_connector(struct drm_connector *connector);
-int drm_fb_helper_parse_command_line(struct drm_device *dev);
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/include/drm/drm_fixed.h
similarity index 60%
rename from drivers/gpu/drm/radeon/radeon_fixed.h
rename to include/drm/drm_fixed.h
index 3d4d84e..4a08a66 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -21,41 +21,41 @@
  *
  * Authors: Dave Airlie
  */
-#ifndef RADEON_FIXED_H
-#define RADEON_FIXED_H
+#ifndef DRM_FIXED_H
+#define DRM_FIXED_H
 
-typedef union rfixed {
+typedef union dfixed {
 	u32 full;
 } fixed20_12;
 
 
-#define rfixed_const(A) (u32)(((A) << 12))/*  + ((B + 0.000122)*4096)) */
-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
-#define fixed_init(A) { .full = rfixed_const((A)) }
-#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
-#define rfixed_trunc(A) ((A).full >> 12)
+#define dfixed_const(A) (u32)(((A) << 12))/*  + ((B + 0.000122)*4096)) */
+#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
+#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
+#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
+#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
+#define dfixed_init(A) { .full = dfixed_const((A)) }
+#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
+#define dfixed_trunc(A) ((A).full >> 12)
 
-static inline u32 rfixed_floor(fixed20_12 A)
+static inline u32 dfixed_floor(fixed20_12 A)
 {
-	u32 non_frac = rfixed_trunc(A);
+	u32 non_frac = dfixed_trunc(A);
 
-	return rfixed_const(non_frac);
+	return dfixed_const(non_frac);
 }
 
-static inline u32 rfixed_ceil(fixed20_12 A)
+static inline u32 dfixed_ceil(fixed20_12 A)
 {
-	u32 non_frac = rfixed_trunc(A);
+	u32 non_frac = dfixed_trunc(A);
 
-	if (A.full > rfixed_const(non_frac))
-		return rfixed_const(non_frac + 1);
+	if (A.full > dfixed_const(non_frac))
+		return dfixed_const(non_frac + 1);
 	else
-		return rfixed_const(non_frac);
+		return dfixed_const(non_frac);
 }
 
-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
+static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
 {
 	u64 tmp = ((u64)A.full << 13);
 
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 81e614b..3ff9fc0 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -902,6 +902,7 @@
 #define RADEON_INFO_NUM_GB_PIPES	0x01
 #define RADEON_INFO_NUM_Z_PIPES 	0x02
 #define RADEON_INFO_ACCEL_WORKING	0x03
+#define RADEON_INFO_CRTC_FROM_ID	0x04
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 81eb9f4..267a86c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -66,6 +66,26 @@
 	const uint32_t	*busy_placement;
 };
 
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr:		mapped virtual address
+ * @base:		bus base address
+ * @is_iomem:		is this io memory ?
+ * @size:		size in byte
+ * @offset:		offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+	void		*addr;
+	unsigned long	base;
+	unsigned long	size;
+	unsigned long	offset;
+	bool		is_iomem;
+	bool		io_reserved;
+};
+
 
 /**
  * struct ttm_mem_reg
@@ -75,6 +95,7 @@
  * @num_pages: Actual size of memory region in pages.
  * @page_alignment: Page alignment.
  * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
  *
  * Structure indicating the placement and space resources used by a
  * buffer object.
@@ -87,6 +108,7 @@
 	uint32_t page_alignment;
 	uint32_t mem_type;
 	uint32_t placement;
+	struct ttm_bus_placement bus;
 };
 
 /**
@@ -274,6 +296,7 @@
 		ttm_bo_map_kmap         = 3,
 		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
 	} bo_kmap_type;
+	struct ttm_buffer_object *bo;
 };
 
 /**
@@ -313,7 +336,8 @@
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait: Return immediately if the buffer is busy.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
  * according proposed placement.
@@ -325,7 +349,8 @@
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement,
-				bool interruptible, bool no_wait);
+				bool interruptible, bool no_wait_reserve,
+				bool no_wait_gpu);
 
 /**
  * ttm_bo_unref
@@ -337,6 +362,23 @@
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
 /**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+					    int resched);
+
+/**
  * ttm_bo_synccpu_write_grab
  *
  * @bo: The buffer object:
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6b9db91..0ea602d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -176,8 +176,6 @@
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
-#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2)	/* Fixed memory needs ioremap
-						   before kernel access. */
 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
 
 /**
@@ -189,13 +187,6 @@
  * managed by this memory type.
  * @gpu_offset: If used, the GPU offset of the first managed page of
  * fixed memory or the first managed location in an aperture.
- * @io_offset: The io_offset of the first managed page of IO memory or
- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
- * memory, this should be set to NULL.
- * @io_size: The size of a managed IO region (fixed memory or aperture).
- * @io_addr: Virtual kernel address if the io region is pre-mapped. For
- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
- * @io_addr should be set to NULL.
  * @size: Size of the managed region.
  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
  * as defined in ttm_placement_common.h
@@ -221,9 +212,6 @@
 	bool use_type;
 	uint32_t flags;
 	unsigned long gpu_offset;
-	unsigned long io_offset;
-	unsigned long io_size;
-	void *io_addr;
 	uint64_t size;
 	uint32_t available_caching;
 	uint32_t default_caching;
@@ -311,7 +299,8 @@
 	 */
 	int (*move) (struct ttm_buffer_object *bo,
 		     bool evict, bool interruptible,
-		     bool no_wait, struct ttm_mem_reg *new_mem);
+		     bool no_wait_reserve, bool no_wait_gpu,
+		     struct ttm_mem_reg *new_mem);
 
 	/**
 	 * struct ttm_bo_driver_member verify_access
@@ -351,12 +340,21 @@
 			    struct ttm_mem_reg *new_mem);
 	/* notify the driver we are taking a fault on this BO
 	 * and have reserved it */
-	void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 
 	/**
 	 * notify the driver that we're about to swap out this bo
 	 */
 	void (*swap_notify) (struct ttm_buffer_object *bo);
+
+	/**
+	 * Driver callback on when mapping io memory (for bo_move_memcpy
+	 * for instance). TTM will take care to call io_mem_free whenever
+	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
+	 * are balanced.
+	 */
+	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 };
 
 /**
@@ -633,7 +631,8 @@
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait: Don't sleep waiting for space to become available.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
  * the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +646,8 @@
 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement,
 				struct ttm_mem_reg *mem,
-				bool interruptible, bool no_wait);
+				bool interruptible,
+				bool no_wait_reserve, bool no_wait_gpu);
 /**
  * ttm_bo_wait_for_cpu
  *
@@ -682,6 +682,11 @@
 			     unsigned long *bus_offset,
 			     unsigned long *bus_size);
 
+extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+				struct ttm_mem_reg *mem);
+extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
+				struct ttm_mem_reg *mem);
+
 extern void ttm_bo_global_release(struct ttm_global_reference *ref);
 extern int ttm_bo_global_init(struct ttm_global_reference *ref);
 
@@ -798,7 +803,8 @@
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Optimized move function for a buffer object with both old and
@@ -812,15 +818,16 @@
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-			   bool evict, bool no_wait,
-			   struct ttm_mem_reg *new_mem);
+			   bool evict, bool no_wait_reserve,
+			   bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Fallback move function for a mappable buffer object in mappable memory.
@@ -834,8 +841,8 @@
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-			      bool evict,
-			      bool no_wait, struct ttm_mem_reg *new_mem);
+			      bool evict, bool no_wait_reserve,
+			      bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -854,7 +861,8 @@
  * @sync_obj_arg: An argument to pass to the sync object idle / wait
  * functions.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
@@ -868,7 +876,8 @@
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 				     void *sync_obj,
 				     void *sync_obj_arg,
-				     bool evict, bool no_wait,
+				     bool evict, bool no_wait_reserve,
+				     bool no_wait_gpu,
 				     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644
index 0000000..8bb4de5
--- /dev/null
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Jerome Glisse <jglisse@redhat.com>
+ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include "ttm_bo_driver.h"
+#include "ttm_memory.h"
+
+/**
+ * Get count number of pages from pool to pages list.
+ *
+ * @pages: heado of empty linked list where pages are filled.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state for the page.
+ * @count: number of pages to allocate.
+ */
+int ttm_get_pages(struct list_head *pages,
+		  int flags,
+		  enum ttm_caching_state cstate,
+		  unsigned count);
+/**
+ * Put linked list of pages to pool.
+ *
+ * @pages: list of pages to free.
+ * @page_count: number of pages in the list. Zero can be passed for unknown
+ * count.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state.
+ */
+void ttm_put_pages(struct list_head *pages,
+		   unsigned page_count,
+		   int flags,
+		   enum ttm_caching_state cstate);
+/**
+ * Initialize pool allocator.
+ *
+ * Pool allocator is internaly reference counted so it can be initialized
+ * multiple times but ttm_page_alloc_fini has to be called same number of
+ * times.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+#endif
diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h
new file mode 100644
index 0000000..953b178
--- /dev/null
+++ b/include/linux/altera_jtaguart.h
@@ -0,0 +1,16 @@
+/*
+ * altera_jtaguart.h -- Altera JTAG UART driver defines.
+ */
+
+#ifndef	__ALTJUART_H
+#define	__ALTJUART_H
+
+#define ALTERA_JTAGUART_MAJOR	204
+#define ALTERA_JTAGUART_MINOR	186
+
+struct altera_jtaguart_platform_uart {
+	unsigned long mapbase;	/* Physical address base */
+	unsigned int irq;	/* Interrupt vector */
+};
+
+#endif /* __ALTJUART_H */
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
new file mode 100644
index 0000000..8d44106
--- /dev/null
+++ b/include/linux/altera_uart.h
@@ -0,0 +1,14 @@
+/*
+ * altera_uart.h -- Altera UART driver defines.
+ */
+
+#ifndef	__ALTUART_H
+#define	__ALTUART_H
+
+struct altera_uart_platform_uart {
+	unsigned long mapbase;	/* Physical address base */
+	unsigned int irq;	/* Interrupt vector */
+	unsigned int uartclk;	/* UART clock rate */
+};
+
+#endif /* __ALTUART_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bd0e3c6..e6e0cb5 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/sched.h>
+#include <linux/timer.h>
 #include <linux/writeback.h>
 #include <asm/atomic.h>
 
@@ -88,6 +89,8 @@
 
 	struct device *dev;
 
+	struct timer_list laptop_mode_wb_timer;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debug_dir;
 	struct dentry *debug_stats;
@@ -103,9 +106,10 @@
 void bdi_unregister(struct backing_dev_info *bdi);
 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
-				long nr_pages);
+				long nr_pages, int sb_locked);
 int bdi_writeback_task(struct bdi_writeback *wb);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
+void bdi_arm_supers_timer(void);
 
 extern spinlock_t bdi_lock;
 extern struct list_head bdi_list;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 413284a5..8b7f5e0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -186,15 +186,19 @@
 	};
 
 	/*
-	 * two pointers are available for the IO schedulers, if they need
+	 * Three pointers are available for the IO schedulers, if they need
 	 * more they have to dynamically allocate it.
 	 */
 	void *elevator_private;
 	void *elevator_private2;
+	void *elevator_private3;
 
 	struct gendisk *rq_disk;
 	unsigned long start_time;
-
+#ifdef CONFIG_BLK_CGROUP
+	unsigned long long start_time_ns;
+	unsigned long long io_start_time_ns;    /* when passed to hardware */
+#endif
 	/* Number of scatter-gather DMA addr+len pairs after
 	 * physical address coalescing is performed.
 	 */
@@ -917,7 +921,12 @@
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
 					spinlock_t *lock, int node_id);
+extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
+							   request_fn_proc *,
+							   spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
+						      request_fn_proc *, spinlock_t *);
 extern void blk_cleanup_queue(struct request_queue *);
 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
 extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -994,20 +1003,25 @@
 		return NULL;
 	return bqt->tag_index[tag];
 }
-
-extern int blkdev_issue_flush(struct block_device *, sector_t *);
-#define DISCARD_FL_WAIT		0x01	/* wait for completion */
-#define DISCARD_FL_BARRIER	0x02	/* issue DISCARD_BARRIER request */
-extern int blkdev_issue_discard(struct block_device *, sector_t sector,
-		sector_t nr_sects, gfp_t, int flags);
-
+enum{
+	BLKDEV_WAIT,	/* wait for completion */
+	BLKDEV_BARRIER,	/*issue request with barrier */
+};
+#define BLKDEV_IFL_WAIT		(1 << BLKDEV_WAIT)
+#define BLKDEV_IFL_BARRIER	(1 << BLKDEV_BARRIER)
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
+			unsigned long);
+extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+			sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
 static inline int sb_issue_discard(struct super_block *sb,
 				   sector_t block, sector_t nr_blocks)
 {
 	block <<= (sb->s_blocksize_bits - 9);
 	nr_blocks <<= (sb->s_blocksize_bits - 9);
 	return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
-				    DISCARD_FL_BARRIER);
+				   BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 }
 
 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1196,6 +1210,39 @@
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
 
+#ifdef CONFIG_BLK_CGROUP
+static inline void set_start_time_ns(struct request *req)
+{
+	req->start_time_ns = sched_clock();
+}
+
+static inline void set_io_start_time_ns(struct request *req)
+{
+	req->io_start_time_ns = sched_clock();
+}
+
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+        return req->start_time_ns;
+}
+
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+        return req->io_start_time_ns;
+}
+#else
+static inline void set_start_time_ns(struct request *req) {}
+static inline void set_io_start_time_ns(struct request *req) {}
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+	return 0;
+}
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+	return 0;
+}
+#endif
+
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
@@ -1283,8 +1330,7 @@
 	int (*direct_access) (struct block_device *, sector_t,
 						void **, unsigned long *);
 	int (*media_changed) (struct gendisk *);
-	unsigned long long (*set_capacity) (struct gendisk *,
-						unsigned long long);
+	void (*unlock_native_capacity) (struct gendisk *);
 	int (*revalidate_disk) (struct gendisk *);
 	int (*getgeo)(struct block_device *, struct hd_geometry *);
 	/* this callback is with swap_lock and sometimes page table lock held */
diff --git a/include/linux/device.h b/include/linux/device.h
index 241b96b..7bb9f42 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -22,7 +22,6 @@
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/pm.h>
-#include <linux/semaphore.h>
 #include <asm/atomic.h>
 #include <asm/device.h>
 
@@ -203,6 +202,9 @@
 	int (*suspend)(struct device *dev, pm_message_t state);
 	int (*resume)(struct device *dev);
 
+	const struct kobj_ns_type_operations *ns_type;
+	const void *(*namespace)(struct device *dev);
+
 	const struct dev_pm_ops *pm;
 
 	struct class_private *p;
@@ -404,7 +406,7 @@
 	const char		*init_name; /* initial name of the device */
 	struct device_type	*type;
 
-	struct semaphore	sem;	/* semaphore to synchronize calls to
+	struct mutex		mutex;	/* mutex to synchronize calls to
 					 * its driver.
 					 */
 
@@ -514,17 +516,17 @@
 
 static inline void device_lock(struct device *dev)
 {
-	down(&dev->sem);
+	mutex_lock(&dev->mutex);
 }
 
 static inline int device_trylock(struct device *dev)
 {
-	return down_trylock(&dev->sem);
+	return mutex_trylock(&dev->mutex);
 }
 
 static inline void device_unlock(struct device *dev)
 {
-	up(&dev->sem);
+	mutex_unlock(&dev->mutex);
 }
 
 void driver_init(void);
diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h
index 527504c..4389ae7 100644
--- a/include/linux/dqblk_xfs.h
+++ b/include/linux/dqblk_xfs.h
@@ -110,6 +110,15 @@
 #define FS_DQ_WARNS_MASK	(FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
 
 /*
+ * Accounting values.  These can only be set for filesystem with
+ * non-transactional quotas that require quotacheck(8) in userspace.
+ */
+#define FS_DQ_BCOUNT		(1<<12)
+#define FS_DQ_ICOUNT		(1<<13)
+#define FS_DQ_RTBCOUNT		(1<<14)
+#define FS_DQ_ACCT_MASK		(FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
+
+/*
  * Various flags related to quotactl(2).  Only relevant to XFS filesystems.
  */
 #define XFS_QUOTA_UDQ_ACCT	(1<<0)  /* user quota accounting */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 4341b1a..6853052 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,10 +53,10 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.7"
+#define REL_VERSION "8.3.8rc1"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 92
+#define PRO_VERSION_MAX 94
 
 
 enum drbd_io_error_p {
@@ -139,6 +139,7 @@
 	ERR_DATA_NOT_CURRENT	= 150,
 	ERR_CONNECTED		= 151, /* DRBD 8.3 only */
 	ERR_PERM		= 152,
+	ERR_NEED_APV_93		= 153,
 
 	/* insert new ones above this line */
 	AFTER_LAST_ERR_CODE
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 51f47a5..440b42e 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -133,5 +133,21 @@
 #define DRBD_MAX_BIO_BVECS_MAX 128
 #define DRBD_MAX_BIO_BVECS_DEF 0
 
+#define DRBD_DP_VOLUME_MIN 4
+#define DRBD_DP_VOLUME_MAX 1048576
+#define DRBD_DP_VOLUME_DEF 16384
+
+#define DRBD_DP_INTERVAL_MIN 1
+#define DRBD_DP_INTERVAL_MAX 600
+#define DRBD_DP_INTERVAL_DEF 5
+
+#define DRBD_RS_THROTTLE_TH_MIN 1
+#define DRBD_RS_THROTTLE_TH_MAX 600
+#define DRBD_RS_THROTTLE_TH_DEF 20
+
+#define DRBD_RS_HOLD_OFF_TH_MIN 1
+#define DRBD_RS_HOLD_OFF_TH_MAX 6000
+#define DRBD_RS_HOLD_OFF_TH_DEF 100
+
 #undef RANGE
 #endif
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index f7431a4..ce77a74 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -71,12 +71,17 @@
 NL_PACKET(resize, 7,
 	NL_INT64(		29,	T_MAY_IGNORE,	resize_size)
 	NL_BIT(			68,	T_MAY_IGNORE,	resize_force)
+	NL_BIT(			69,	T_MANDATORY,	no_resync)
 )
 
 NL_PACKET(syncer_conf, 8,
 	NL_INTEGER(	30,	T_MAY_IGNORE,	rate)
 	NL_INTEGER(	31,	T_MAY_IGNORE,	after)
 	NL_INTEGER(	32,	T_MAY_IGNORE,	al_extents)
+	NL_INTEGER(     71,	T_MAY_IGNORE,	dp_volume)
+	NL_INTEGER(     72,	T_MAY_IGNORE,	dp_interval)
+	NL_INTEGER(     73,	T_MAY_IGNORE,	throttle_th)
+	NL_INTEGER(     74,	T_MAY_IGNORE,	hold_off_th)
 	NL_STRING(      52,     T_MAY_IGNORE,   verify_alg,     SHARED_SECRET_MAX)
 	NL_STRING(      51,     T_MAY_IGNORE,   cpu_mask,       32)
 	NL_STRING(	64,	T_MAY_IGNORE,	csums_alg,	SHARED_SECRET_MAX)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 1cb3372e..2c958f4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -14,6 +14,9 @@
 
 typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
 
+typedef void (elevator_bio_merged_fn) (struct request_queue *,
+						struct request *, struct bio *);
+
 typedef int (elevator_dispatch_fn) (struct request_queue *, int);
 
 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
@@ -36,6 +39,7 @@
 	elevator_merged_fn *elevator_merged_fn;
 	elevator_merge_req_fn *elevator_merge_req_fn;
 	elevator_allow_merge_fn *elevator_allow_merge_fn;
+	elevator_bio_merged_fn *elevator_bio_merged_fn;
 
 	elevator_dispatch_fn *elevator_dispatch_fn;
 	elevator_add_req_fn *elevator_add_req_fn;
@@ -103,6 +107,8 @@
 extern void elv_merge_requests(struct request_queue *, struct request *,
 			       struct request *);
 extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_bio_merged(struct request_queue *q, struct request *,
+				struct bio *);
 extern void elv_requeue_request(struct request_queue *, struct request *);
 extern int elv_queue_empty(struct request_queue *);
 extern struct request *elv_former_request(struct request_queue *, struct request *);
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index 1cdb663..db4d9f5 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -106,6 +106,15 @@
 	spinlock_t s_rsv_window_lock;
 	struct rb_root s_rsv_window_root;
 	struct ext2_reserve_window_node s_rsv_window_head;
+	/*
+	 * s_lock protects against concurrent modifications of s_mount_state,
+	 * s_blocks_last, s_overhead_last and the content of superblock's
+	 * buffer pointed to by sbi->s_es.
+	 *
+	 * Note: It is used in ext2_show_options() to provide a consistent view
+	 * of the mount options.
+	 */
+	spinlock_t s_lock;
 };
 
 static inline spinlock_t *
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c10163b..1296af4 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -403,6 +403,7 @@
 #include <linux/notifier.h>
 #include <linux/list.h>
 #include <linux/backlight.h>
+#include <linux/slab.h>
 #include <asm/io.h>
 
 struct vm_area_struct;
@@ -862,10 +863,22 @@
 	/* we need the PCI or similiar aperture base/size not
 	   smem_start/size as smem_start may just be an object
 	   allocated inside the aperture so may not actually overlap */
-	resource_size_t aperture_base;
-	resource_size_t aperture_size;
+	struct apertures_struct {
+		unsigned int count;
+		struct aperture {
+			resource_size_t base;
+			resource_size_t size;
+		} ranges[0];
+	} *apertures;
 };
 
+static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
+	struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
+			+ max_num * sizeof(struct aperture), GFP_KERNEL);
+	a->count = max_num;
+	return a;
+}
+
 #ifdef MODULE
 #define FBINFO_DEFAULT	FBINFO_MODULE
 #else
@@ -958,6 +971,8 @@
 /* drivers/video/fbmem.c */
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
+extern void remove_conflicting_framebuffers(struct apertures_struct *a,
+				const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
 extern int fb_show_logo(struct fb_info *fb_info, int rotate);
 extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 8603740..afc00af 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -22,6 +22,12 @@
 #define F_NOTIFY	(F_LINUX_SPECIFIC_BASE+2)
 
 /*
+ * Set and get of pipe page size array
+ */
+#define F_SETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 7)
+#define F_GETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 8)
+
+/*
  * Types of directory notifications that may be requested.
  */
 #define DN_ACCESS	0x00000001	/* File accessed */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 043811f..53d1e6c 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -12,6 +12,7 @@
 struct firmware {
 	size_t size;
 	const u8 *data;
+	struct page **pages;
 };
 
 struct device;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4079ef9..1775d36 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -651,6 +651,7 @@
 	int			bd_openers;
 	struct mutex		bd_mutex;	/* open/close mutex */
 	struct list_head	bd_inodes;
+	void *			bd_claiming;
 	void *			bd_holder;
 	int			bd_holders;
 #ifdef CONFIG_SYSFS
diff --git a/include/linux/gsmmux.h b/include/linux/gsmmux.h
new file mode 100644
index 0000000..378de41
--- /dev/null
+++ b/include/linux/gsmmux.h
@@ -0,0 +1,25 @@
+#ifndef _LINUX_GSMMUX_H
+#define _LINUX_GSMMUX_H
+
+struct gsm_config
+{
+	unsigned int adaption;
+	unsigned int encapsulation;
+	unsigned int initiator;
+	unsigned int t1;
+	unsigned int t2;
+	unsigned int t3;
+	unsigned int n2;
+	unsigned int mru;
+	unsigned int mtu;
+	unsigned int k;
+	unsigned int i;
+	unsigned int unused[8];		/* Padding for expansion without
+					   breaking stuff */
+};
+
+#define GSMIOC_GETCONF		_IOR('G', 0, struct gsm_config)
+#define GSMIOC_SETCONF		_IOW('G', 1, struct gsm_config)
+
+
+#endif
diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
deleted file mode 100644
index 6a87154..0000000
--- a/include/linux/hdpu_features.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/spinlock.h>
-
-struct cpustate_t {
-	spinlock_t lock;
-	int excl;
-        int open_count;
-	unsigned char cached_val;
-	int inited;
-	unsigned long *set_addr;
-	unsigned long *clr_addr;
-};
-
-
-#define HDPU_CPUSTATE_NAME "hdpu cpustate"
-#define HDPU_NEXUS_NAME "hdpu nexus"
-
-#define CPUSTATE_KERNEL_MAJOR  0x10
-
-#define CPUSTATE_KERNEL_INIT_DRV   0 /* CPU State Driver Initialized */
-#define CPUSTATE_KERNEL_INIT_PCI   1 /* 64360 PCI Busses Init */
-#define CPUSTATE_KERNEL_INIT_REG   2 /* 64360 Bridge Init */
-#define CPUSTATE_KERNEL_CPU1_KICK  3 /* Boot cpu 1 */
-#define CPUSTATE_KERNEL_CPU1_OK    4  /* Cpu 1 has checked in */
-#define CPUSTATE_KERNEL_OK         5 /* Terminal state */
-#define CPUSTATE_KERNEL_RESET   14 /* Board reset via SW*/
-#define CPUSTATE_KERNEL_HALT   15 /* Board halted via SW*/
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b1344ec..895001f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -308,11 +308,13 @@
 #define HID_QUIRK_NOTOUCH			0x00000002
 #define HID_QUIRK_IGNORE			0x00000004
 #define HID_QUIRK_NOGET				0x00000008
+#define HID_QUIRK_HIDDEV_FORCE			0x00000010
 #define HID_QUIRK_BADPAD			0x00000020
 #define HID_QUIRK_MULTI_INPUT			0x00000040
 #define HID_QUIRK_SKIP_OUTPUT_REPORTS		0x00010000
 #define HID_QUIRK_FULLSPEED_INTERVAL		0x10000000
 #define HID_QUIRK_NO_INIT_REPORTS		0x20000000
+#define HID_QUIRK_NO_IGNORE			0x40000000
 
 /*
  * This is the global environment of the parser. This information is
@@ -589,6 +591,9 @@
  * @report_fixup: called before report descriptor parsing (NULL means nop)
  * @input_mapping: invoked on input registering before mapping an usage
  * @input_mapped: invoked on input registering after mapping an usage
+ * @suspend: invoked on suspend (NULL means nop)
+ * @resume: invoked on resume if device was not reset (NULL means nop)
+ * @reset_resume: invoked on resume if device was reset (NULL means nop)
  *
  * raw_event and event should return 0 on no action performed, 1 when no
  * further processing should be done and negative on error
@@ -629,6 +634,11 @@
 	int (*input_mapped)(struct hid_device *hdev,
 			struct hid_input *hidinput, struct hid_field *field,
 			struct hid_usage *usage, unsigned long **bit, int *max);
+#ifdef CONFIG_PM
+	int (*suspend)(struct hid_device *hdev, pm_message_t message);
+	int (*resume)(struct hid_device *hdev);
+	int (*reset_resume)(struct hid_device *hdev);
+#endif
 /* private: */
 	struct device_driver driver;
 };
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 3239d1c..b6d4480 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -362,7 +362,7 @@
 struct ide_disk_ops {
 	int		(*check)(struct ide_drive_s *, const char *);
 	int		(*get_capacity)(struct ide_drive_s *);
-	u64		(*set_capacity)(struct ide_drive_s *, u64);
+	void		(*unlock_native_capacity)(struct ide_drive_s *);
 	void		(*setup)(struct ide_drive_s *);
 	void		(*flush)(struct ide_drive_s *);
 	int		(*init_media)(struct ide_drive_s *, struct gendisk *);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5137db3..c233113 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -78,7 +78,7 @@
 	IRQTF_AFFINITY,
 };
 
-/**
+/*
  * These values can be returned by request_any_context_irq() and
  * describe the context the interrupt will be run in.
  *
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 516a2a2..e069650 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -427,9 +427,9 @@
 	enum {
 		T_RUNNING,
 		T_LOCKED,
-		T_RUNDOWN,
 		T_FLUSH,
 		T_COMMIT,
+		T_COMMIT_RECORD,
 		T_FINISHED
 	}			t_state;
 
@@ -991,6 +991,7 @@
 int journal_force_commit_nested(journal_t *journal);
 int log_wait_commit(journal_t *journal, tid_t tid);
 int log_do_checkpoint(journal_t *journal);
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
 
 void __log_wait_for_space(journal_t *journal);
 extern void	__journal_drop_transaction(journal_t *, transaction_t *);
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
new file mode 100644
index 0000000..ccb2b3e
--- /dev/null
+++ b/include/linux/kdb.h
@@ -0,0 +1,117 @@
+#ifndef _KDB_H
+#define _KDB_H
+
+/*
+ * Kernel Debugger Architecture Independent Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
+ */
+
+#ifdef	CONFIG_KGDB_KDB
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#define KDB_POLL_FUNC_MAX	5
+extern int kdb_poll_idx;
+
+/*
+ * kdb_initial_cpu is initialized to -1, and is set to the cpu
+ * number whenever the kernel debugger is entered.
+ */
+extern int kdb_initial_cpu;
+extern atomic_t kdb_event;
+
+/*
+ * kdb_diemsg
+ *
+ *	Contains a pointer to the last string supplied to the
+ *	kernel 'die' panic function.
+ */
+extern const char *kdb_diemsg;
+
+#define KDB_FLAG_EARLYKDB	(1 << 0) /* set from boot parameter kdb=early */
+#define KDB_FLAG_CATASTROPHIC	(1 << 1) /* A catastrophic event has occurred */
+#define KDB_FLAG_CMD_INTERRUPT	(1 << 2) /* Previous command was interrupted */
+#define KDB_FLAG_NOIPI		(1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_ONLY_DO_DUMP	(1 << 4) /* Only do a dump, used when
+					  * kdb is off */
+#define KDB_FLAG_NO_CONSOLE	(1 << 5) /* No console is available,
+					  * kdb is disabled */
+#define KDB_FLAG_NO_VT_CONSOLE	(1 << 6) /* No VT console is available, do
+					  * not use keyboard */
+#define KDB_FLAG_NO_I8042	(1 << 7) /* No i8042 chip is available, do
+					  * not use keyboard */
+
+extern int kdb_flags;	/* Global flags, see kdb_state for per cpu state */
+
+extern void kdb_save_flags(void);
+extern void kdb_restore_flags(void);
+
+#define KDB_FLAG(flag)		(kdb_flags & KDB_FLAG_##flag)
+#define KDB_FLAG_SET(flag)	((void)(kdb_flags |= KDB_FLAG_##flag))
+#define KDB_FLAG_CLEAR(flag)	((void)(kdb_flags &= ~KDB_FLAG_##flag))
+
+/*
+ * External entry point for the kernel debugger.  The pt_regs
+ * at the time of entry are supplied along with the reason for
+ * entry to the kernel debugger.
+ */
+
+typedef enum {
+	KDB_REASON_ENTER = 1,	/* KDB_ENTER() trap/fault - regs valid */
+	KDB_REASON_ENTER_SLAVE,	/* KDB_ENTER_SLAVE() trap/fault - regs valid */
+	KDB_REASON_BREAK,	/* Breakpoint inst. - regs valid */
+	KDB_REASON_DEBUG,	/* Debug Fault - regs valid */
+	KDB_REASON_OOPS,	/* Kernel Oops - regs valid */
+	KDB_REASON_SWITCH,	/* CPU switch - regs valid*/
+	KDB_REASON_KEYBOARD,	/* Keyboard entry - regs valid */
+	KDB_REASON_NMI,		/* Non-maskable interrupt; regs valid */
+	KDB_REASON_RECURSE,	/* Recursive entry to kdb;
+				 * regs probably valid */
+	KDB_REASON_SSTEP,	/* Single Step trap. - regs valid */
+} kdb_reason_t;
+
+extern int kdb_trap_printk;
+extern int vkdb_printf(const char *fmt, va_list args)
+	    __attribute__ ((format (printf, 1, 0)));
+extern int kdb_printf(const char *, ...)
+	    __attribute__ ((format (printf, 1, 2)));
+typedef int (*kdb_printf_t)(const char *, ...)
+	     __attribute__ ((format (printf, 1, 2)));
+
+extern void kdb_init(int level);
+
+/* Access to kdb specific polling devices */
+typedef int (*get_char_func)(void);
+extern get_char_func kdb_poll_funcs[];
+extern int kdb_get_kbd_char(void);
+
+static inline
+int kdb_process_cpu(const struct task_struct *p)
+{
+	unsigned int cpu = task_thread_info(p)->cpu;
+	if (cpu > num_possible_cpus())
+		cpu = 0;
+	return cpu;
+}
+
+/* kdb access to register set for stack dumping */
+extern struct pt_regs *kdb_current_regs;
+
+#else /* ! CONFIG_KGDB_KDB */
+#define kdb_printf(...)
+#define kdb_init(x)
+#endif	/* CONFIG_KGDB_KDB */
+enum {
+	KDB_NOT_INITIALIZED,
+	KDB_INIT_EARLY,
+	KDB_INIT_FULL,
+};
+#endif	/* !_KDB_H */
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 19ec41a..9340f34 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,10 +16,12 @@
 #include <linux/serial_8250.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
-
 #include <asm/atomic.h>
+#ifdef CONFIG_HAVE_ARCH_KGDB
 #include <asm/kgdb.h>
+#endif
 
+#ifdef CONFIG_KGDB
 struct pt_regs;
 
 /**
@@ -34,20 +36,6 @@
 extern int kgdb_skipexception(int exception, struct pt_regs *regs);
 
 /**
- *	kgdb_post_primary_code - (optional) Save error vector/code numbers.
- *	@regs: Original pt_regs.
- *	@e_vector: Original error vector.
- *	@err_code: Original error code.
- *
- *	This is usually needed on architectures which support SMP and
- *	KGDB.  This function is called after all the secondary cpus have
- *	been put to a know spin state and the primary CPU has control over
- *	KGDB.
- */
-extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
-				  int err_code);
-
-/**
  *	kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
  *	@regs: Current &struct pt_regs.
  *
@@ -72,6 +60,7 @@
 void kgdb_breakpoint(void);
 
 extern int kgdb_connected;
+extern int kgdb_io_module_registered;
 
 extern atomic_t			kgdb_setting_breakpoint;
 extern atomic_t			kgdb_cpu_doing_single_step;
@@ -202,12 +191,34 @@
  */
 extern void kgdb_roundup_cpus(unsigned long flags);
 
+/**
+ *	kgdb_arch_set_pc - Generic call back to the program counter
+ *	@regs: Current &struct pt_regs.
+ *  @pc: The new value for the program counter
+ *
+ *	This function handles updating the program counter and requires an
+ *	architecture specific implementation.
+ */
+extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
+
+
 /* Optional functions. */
 extern int kgdb_validate_break_address(unsigned long addr);
 extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
 extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
 
 /**
+ *	kgdb_arch_late - Perform any architecture specific initalization.
+ *
+ *	This function will handle the late initalization of any
+ *	architecture specific callbacks.  This is an optional function for
+ *	handling things like late initialization of hw breakpoints.  The
+ *	default implementation does nothing.
+ */
+extern void kgdb_arch_late(void);
+
+
+/**
  * struct kgdb_arch - Describe architecture specific values.
  * @gdb_bpt_instr: The instruction to trigger a breakpoint.
  * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT.
@@ -247,6 +258,8 @@
  * the I/O driver.
  * @post_exception: Pointer to a function that will do any cleanup work
  * for the I/O driver.
+ * @is_console: 1 if the end device is a console 0 if the I/O device is
+ * not a console
  */
 struct kgdb_io {
 	const char		*name;
@@ -256,6 +269,7 @@
 	int			(*init) (void);
 	void			(*pre_exception) (void);
 	void			(*post_exception) (void);
+	int			is_console;
 };
 
 extern struct kgdb_arch		arch_kgdb_ops;
@@ -264,12 +278,14 @@
 
 extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
 extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern struct kgdb_io *dbg_io_ops;
 
 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
 extern int kgdb_mem2hex(char *mem, char *buf, int count);
 extern int kgdb_hex2mem(char *buf, char *mem, int count);
 
 extern int kgdb_isremovedbreak(unsigned long addr);
+extern void kgdb_schedule_breakpoint(void);
 
 extern int
 kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -278,5 +294,12 @@
 
 extern int			kgdb_single_step;
 extern atomic_t			kgdb_active;
-
+#define in_dbg_master() \
+	(raw_smp_processor_id() == atomic_read(&kgdb_active))
+extern bool dbg_is_early;
+extern void __init dbg_late_init(void);
+#else /* ! CONFIG_KGDB */
+#define in_dbg_master() (0)
+#define dbg_late_init()
+#endif /* ! CONFIG_KGDB */
 #endif /* _KGDB_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 3950d3c..cf343a8 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -108,6 +108,8 @@
 	void (*release)(struct kobject *kobj);
 	const struct sysfs_ops *sysfs_ops;
 	struct attribute **default_attrs;
+	const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+	const void *(*namespace)(struct kobject *kobj);
 };
 
 struct kobj_uevent_env {
@@ -134,6 +136,42 @@
 
 extern const struct sysfs_ops kobj_sysfs_ops;
 
+/*
+ * Namespace types which are used to tag kobjects and sysfs entries.
+ * Network namespace will likely be the first.
+ */
+enum kobj_ns_type {
+	KOBJ_NS_TYPE_NONE = 0,
+	KOBJ_NS_TYPE_NET,
+	KOBJ_NS_TYPES
+};
+
+struct sock;
+
+/*
+ * Callbacks so sysfs can determine namespaces
+ *   @current_ns: return calling task's namespace
+ *   @netlink_ns: return namespace to which a sock belongs (right?)
+ *   @initial_ns: return the initial namespace (i.e. init_net_ns)
+ */
+struct kobj_ns_type_operations {
+	enum kobj_ns_type type;
+	const void *(*current_ns)(void);
+	const void *(*netlink_ns)(struct sock *sk);
+	const void *(*initial_ns)(void);
+};
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int kobj_ns_type_registered(enum kobj_ns_type type);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+
+const void *kobj_ns_current(enum kobj_ns_type type);
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
+const void *kobj_ns_initial(enum kobj_ns_type type);
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+
+
 /**
  * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
  *
diff --git a/include/linux/kref.h b/include/linux/kref.h
index baf4b9e..6cc38fc 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -21,7 +21,6 @@
 	atomic_t refcount;
 };
 
-void kref_set(struct kref *kref, int num);
 void kref_init(struct kref *kref);
 void kref_get(struct kref *kref);
 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a03977a9..06aed83 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -44,6 +44,8 @@
 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
 };
 
+extern struct lock_class_key __lockdep_no_validate__;
+
 #define LOCKSTAT_POINTS		4
 
 /*
@@ -270,6 +272,9 @@
 #define lockdep_set_subclass(lock, sub)	\
 		lockdep_init_map(&(lock)->dep_map, #lock, \
 				 (lock)->dep_map.key, sub)
+
+#define lockdep_set_novalidate_class(lock) \
+	lockdep_set_class(lock, &__lockdep_no_validate__)
 /*
  * Compare locking classes
  */
@@ -354,6 +359,9 @@
 #define lockdep_set_class_and_subclass(lock, key, sub) \
 		do { (void)(key); } while (0)
 #define lockdep_set_subclass(lock, sub)		do { } while (0)
+
+#define lockdep_set_novalidate_class(lock) do { } while (0)
+
 /*
  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
  * case since the result is not well defined and the caller should rather
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
new file mode 100644
index 0000000..d11fe0f
--- /dev/null
+++ b/include/linux/msm_mdp.h
@@ -0,0 +1,78 @@
+/* include/linux/msm_mdp.h
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_MDP_H_
+#define _MSM_MDP_H_
+
+#include <linux/types.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+
+enum {
+	MDP_RGB_565,		/* RGB 565 planar */
+	MDP_XRGB_8888,		/* RGB 888 padded */
+	MDP_Y_CBCR_H2V2,	/* Y and CbCr, pseudo planar w/ Cb is in MSB */
+	MDP_ARGB_8888,		/* ARGB 888 */
+	MDP_RGB_888,		/* RGB 888 planar */
+	MDP_Y_CRCB_H2V2,	/* Y and CrCb, pseudo planar w/ Cr is in MSB */
+	MDP_YCRYCB_H2V1,	/* YCrYCb interleave */
+	MDP_Y_CRCB_H2V1,	/* Y and CrCb, pseduo planar w/ Cr is in MSB */
+	MDP_Y_CBCR_H2V1,	/* Y and CrCb, pseduo planar w/ Cr is in MSB */
+	MDP_RGBA_8888,		/* ARGB 888 */
+	MDP_BGRA_8888,		/* ABGR 888 */
+	MDP_IMGTYPE_LIMIT	/* Non valid image type after this enum */
+};
+
+enum {
+	PMEM_IMG,
+	FB_IMG,
+};
+
+/* flag values */
+#define MDP_ROT_NOP	0
+#define MDP_FLIP_LR	0x1
+#define MDP_FLIP_UD	0x2
+#define MDP_ROT_90	0x4
+#define MDP_ROT_180	(MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270	(MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER	0x8
+#define MDP_BLUR	0x10
+
+#define MDP_TRANSP_NOP	0xffffffff
+#define MDP_ALPHA_NOP	0xff
+
+struct mdp_rect {
+	u32 x, y, w, h;
+};
+
+struct mdp_img {
+	u32 width, height, format, offset;
+	int memory_id;		/* the file descriptor */
+};
+
+struct mdp_blit_req {
+	struct mdp_img src;
+	struct mdp_img dst;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	u32 alpha, transp_mask, flags;
+};
+
+struct mdp_blit_req_list {
+	u32 count;
+	struct mdp_blit_req req[];
+};
+
+#endif /* _MSM_MDP_H_ */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6eaca5e..59d0669 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -188,6 +188,10 @@
 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
 			     __u32 group, gfp_t allocation);
+extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+	__u32 pid, __u32 group, gfp_t allocation,
+	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+	void *filter_data);
 extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
 extern int netlink_register_notifier(struct notifier_block *nb);
 extern int netlink_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 51611da..8d84062 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -24,7 +24,19 @@
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/timer.h>
 
+/**
+ * struct padata_priv -  Embedded to the users data structure.
+ *
+ * @list: List entry, to attach to the padata lists.
+ * @pd: Pointer to the internal control structure.
+ * @cb_cpu: Callback cpu for serializatioon.
+ * @seq_nr: Sequence number of the parallelized data object.
+ * @info: Used to pass information from the parallel to the serial function.
+ * @parallel: Parallel execution function.
+ * @serial: Serial complete function.
+ */
 struct padata_priv {
 	struct list_head	list;
 	struct parallel_data	*pd;
@@ -35,11 +47,29 @@
 	void                    (*serial)(struct padata_priv *padata);
 };
 
+/**
+ * struct padata_list
+ *
+ * @list: List head.
+ * @lock: List lock.
+ */
 struct padata_list {
 	struct list_head        list;
 	spinlock_t              lock;
 };
 
+/**
+ * struct padata_queue - The percpu padata queues.
+ *
+ * @parallel: List to wait for parallelization.
+ * @reorder: List to wait for reordering after parallel processing.
+ * @serial: List to wait for serialization after reordering.
+ * @pwork: work struct for parallelization.
+ * @swork: work struct for serialization.
+ * @pd: Backpointer to the internal control structure.
+ * @num_obj: Number of objects that are processed by this cpu.
+ * @cpu_index: Index of the cpu.
+ */
 struct padata_queue {
 	struct padata_list	parallel;
 	struct padata_list	reorder;
@@ -51,6 +81,20 @@
 	int			cpu_index;
 };
 
+/**
+ * struct parallel_data - Internal control structure, covers everything
+ * that depends on the cpumask in use.
+ *
+ * @pinst: padata instance.
+ * @queue: percpu padata queues.
+ * @seq_nr: The sequence number that will be attached to the next object.
+ * @reorder_objects: Number of objects waiting in the reorder queues.
+ * @refcnt: Number of objects holding a reference on this parallel_data.
+ * @max_seq_nr:  Maximal used sequence number.
+ * @cpumask: cpumask in use.
+ * @lock: Reorder lock.
+ * @timer: Reorder timer.
+ */
 struct parallel_data {
 	struct padata_instance	*pinst;
 	struct padata_queue	*queue;
@@ -60,8 +104,19 @@
 	unsigned int		max_seq_nr;
 	cpumask_var_t		cpumask;
 	spinlock_t              lock;
+	struct timer_list       timer;
 };
 
+/**
+ * struct padata_instance - The overall control structure.
+ *
+ * @cpu_notifier: cpu hotplug notifier.
+ * @wq: The workqueue in use.
+ * @pd: The internal control structure.
+ * @cpumask: User supplied cpumask.
+ * @lock: padata instance lock.
+ * @flags: padata flags.
+ */
 struct padata_instance {
 	struct notifier_block   cpu_notifier;
 	struct workqueue_struct *wq;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index b43a9e0..16de393 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -3,7 +3,7 @@
 
 #define PIPEFS_MAGIC 0x50495045
 
-#define PIPE_BUFFERS (16)
+#define PIPE_DEF_BUFFERS	16
 
 #define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
 #define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
@@ -44,17 +44,17 @@
  **/
 struct pipe_inode_info {
 	wait_queue_head_t wait;
-	unsigned int nrbufs, curbuf;
-	struct page *tmp_page;
+	unsigned int nrbufs, curbuf, buffers;
 	unsigned int readers;
 	unsigned int writers;
 	unsigned int waiting_writers;
 	unsigned int r_counter;
 	unsigned int w_counter;
+	struct page *tmp_page;
 	struct fasync_struct *fasync_readers;
 	struct fasync_struct *fasync_writers;
 	struct inode *inode;
-	struct pipe_buffer bufs[PIPE_BUFFERS];
+	struct pipe_buffer *bufs;
 };
 
 /*
@@ -139,6 +139,8 @@
 void pipe_unlock(struct pipe_inode_info *);
 void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
 
+extern unsigned int pipe_max_pages;
+
 /* Drop the inode semaphore and wait for a pipe event, atomically */
 void pipe_wait(struct pipe_inode_info *pipe);
 
@@ -154,4 +156,7 @@
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 
+/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+
 #endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b462916..7126a15 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -174,6 +174,8 @@
 #include <linux/rwsem.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
 
 #include <linux/dqblk_xfs.h>
 #include <linux/dqblk_v1.h>
@@ -238,19 +240,43 @@
 	return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
 }
 
-struct dqstats {
-	int lookups;
-	int drops;
-	int reads;
-	int writes;
-	int cache_hits;
-	int allocated_dquots;
-	int free_dquots;
-	int syncs;
+enum {
+	DQST_LOOKUPS,
+	DQST_DROPS,
+	DQST_READS,
+	DQST_WRITES,
+	DQST_CACHE_HITS,
+	DQST_ALLOC_DQUOTS,
+	DQST_FREE_DQUOTS,
+	DQST_SYNCS,
+	_DQST_DQSTAT_LAST
 };
 
+struct dqstats {
+	int stat[_DQST_DQSTAT_LAST];
+};
+
+extern struct dqstats *dqstats_pcpu;
 extern struct dqstats dqstats;
 
+static inline void dqstats_inc(unsigned int type)
+{
+#ifdef CONFIG_SMP
+	per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++;
+#else
+	dqstats.stat[type]++;
+#endif
+}
+
+static inline void dqstats_dec(unsigned int type)
+{
+#ifdef CONFIG_SMP
+	per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--;
+#else
+	dqstats.stat[type]--;
+#endif
+}
+
 #define DQ_MOD_B	0	/* dquot modified since read */
 #define DQ_BLKS_B	1	/* uid/gid has been warned about blk limit */
 #define DQ_INODES_B	2	/* uid/gid has been warned about inode limit */
@@ -311,12 +337,10 @@
 	int (*quota_sync)(struct super_block *, int, int);
 	int (*get_info)(struct super_block *, int, struct if_dqinfo *);
 	int (*set_info)(struct super_block *, int, struct if_dqinfo *);
-	int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
-	int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
+	int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
+	int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
 	int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
 	int (*set_xstate)(struct super_block *, unsigned int, int);
-	int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
-	int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
 };
 
 struct quota_format_type {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index e6fa7ac..370abb1 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -14,6 +14,14 @@
 	return &sb->s_dquot;
 }
 
+/* i_mutex must being held */
+static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+{
+	return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+		(ia->ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
+		(ia->ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid);
+}
+
 #if defined(CONFIG_QUOTA)
 
 /*
@@ -63,9 +71,12 @@
 int vfs_quota_sync(struct super_block *sb, int type, int wait);
 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+		struct fs_disk_quota *di);
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+		struct fs_disk_quota *di);
 
+int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
 int dquot_transfer(struct inode *inode, struct iattr *iattr);
 int vfs_dq_quota_on_remount(struct super_block *sb);
 
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 4e768dd..8600508 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -20,4 +20,6 @@
 extern const struct vm_operations_struct generic_file_vm_ops;
 extern int __init init_rootfs(void);
 
+int ramfs_fill_super(struct super_block *sb, void *data, int silent);
+
 #endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 78dd1e7..f10db6e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -182,6 +182,10 @@
 /* Aeroflex Gaisler GRLIB APBUART */
 #define PORT_APBUART    90
 
+/* Altera UARTs */
+#define PORT_ALTERA_JTAGUART	91
+#define PORT_ALTERA_UART	92
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
@@ -246,6 +250,7 @@
 #endif
 };
 
+#define NO_POLL_CHAR		0x00ff0000
 #define UART_CONFIG_TYPE	(1 << 0)
 #define UART_CONFIG_IRQ		(1 << 1)
 
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 18e7c7c..997c3b4 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -82,4 +82,11 @@
 extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 				      splice_direct_actor *);
 
+/*
+ * for dynamic pipe sizing
+ */
+extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct pipe_inode_info *,
+				struct splice_pipe_desc *);
+
 #endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index f0496b3..f2694eb 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -20,6 +20,7 @@
 
 struct kobject;
 struct module;
+enum kobj_ns_type;
 
 /* FIXME
  * The *owner field is no longer used.
@@ -86,17 +87,18 @@
 
 #define attr_name(_attr) (_attr).attr.name
 
+struct file;
 struct vm_area_struct;
 
 struct bin_attribute {
 	struct attribute	attr;
 	size_t			size;
 	void			*private;
-	ssize_t (*read)(struct kobject *, struct bin_attribute *,
+	ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
 			char *, loff_t, size_t);
-	ssize_t (*write)(struct kobject *, struct bin_attribute *,
+	ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
 			 char *, loff_t, size_t);
-	int (*mmap)(struct kobject *, struct bin_attribute *attr,
+	int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
 		    struct vm_area_struct *vma);
 };
 
@@ -154,6 +156,9 @@
 int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
 			const char *old_name, const char *new_name);
 
+void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
+			const char *name);
+
 int __must_check sysfs_create_group(struct kobject *kobj,
 				    const struct attribute_group *grp);
 int sysfs_update_group(struct kobject *kobj,
@@ -168,10 +173,15 @@
 void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
 void sysfs_notify_dirent(struct sysfs_dirent *sd);
 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+				      const void *ns,
 				      const unsigned char *name);
 struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
 void sysfs_put(struct sysfs_dirent *sd);
 void sysfs_printk_last_file(void);
+
+/* Called to clear a ns tag when it is no longer valid */
+void sysfs_exit_ns(enum kobj_ns_type type, const void *tag);
+
 int __must_check sysfs_init(void);
 
 #else /* CONFIG_SYSFS */
@@ -264,6 +274,11 @@
 	return 0;
 }
 
+static inline void sysfs_delete_link(struct kobject *k, struct kobject *t,
+				     const char *name)
+{
+}
+
 static inline int sysfs_create_group(struct kobject *kobj,
 				     const struct attribute_group *grp)
 {
@@ -301,6 +316,7 @@
 }
 static inline
 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+				      const void *ns,
 				      const unsigned char *name)
 {
 	return NULL;
@@ -313,6 +329,10 @@
 {
 }
 
+static inline void sysfs_exit_ns(int type, const void *tag)
+{
+}
+
 static inline int __must_check sysfs_init(void)
 {
 	return 0;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index bb44fa9..931078b 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
  */
 #define NR_UNIX98_PTY_DEFAULT	4096      /* Default maximum for Unix98 ptys */
 #define NR_UNIX98_PTY_MAX	(1 << MINORBITS) /* Absolute limit */
-#define NR_LDISCS		21
+#define NR_LDISCS		30
 
 /* line disciplines */
 #define N_TTY		0
@@ -48,6 +48,7 @@
 #define N_PPS		18	/* Pulse per Second */
 #define N_V253		19	/* Codec control over voice modem */
 #define N_CAIF		20      /* CAIF protocol for talking to modems */
+#define N_GSM0710	21	/* GSM 0710 Mux */
 
 /*
  * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 36520de..cc97d6c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -65,6 +65,15 @@
 	 * so we use a single control to update them
 	 */
 	unsigned no_nrwrite_index_update:1;
+
+	/*
+	 * For WB_SYNC_ALL, the sb must always be pinned. For WB_SYNC_NONE,
+	 * the writeback code will pin the sb for the caller. However,
+	 * for eg umount, the caller does WB_SYNC_NONE but already has
+	 * the sb pinned. If the below is set, caller already has the
+	 * sb pinned.
+	 */
+	unsigned sb_pinned:1;
 };
 
 /*
@@ -73,6 +82,7 @@
 struct bdi_writeback;
 int inode_wait(void *);
 void writeback_inodes_sb(struct super_block *);
+void writeback_inodes_sb_locked(struct super_block *);
 int writeback_inodes_sb_if_idle(struct super_block *);
 void sync_inodes_sb(struct super_block *);
 void writeback_inodes_wbc(struct writeback_control *wbc);
@@ -96,8 +106,14 @@
 /*
  * mm/page-writeback.c
  */
-void laptop_io_completion(void);
+#ifdef CONFIG_BLOCK
+void laptop_io_completion(struct backing_dev_info *info);
 void laptop_sync_completion(void);
+void laptop_mode_sync(struct work_struct *work);
+void laptop_mode_timer_fn(unsigned long data);
+#else
+static inline void laptop_sync_completion(void) { }
+#endif
 void throttle_vm_writeout(gfp_t gfp_mask);
 
 /* These are exported to sysctl. */
diff --git a/init/Kconfig b/init/Kconfig
index 5fe94b8..2cce9f3 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -611,6 +611,33 @@
 
 endif #CGROUP_SCHED
 
+config BLK_CGROUP
+	tristate "Block IO controller"
+	depends on CGROUPS && BLOCK
+	default n
+	---help---
+	Generic block IO controller cgroup interface. This is the common
+	cgroup interface which should be used by various IO controlling
+	policies.
+
+	Currently, CFQ IO scheduler uses it to recognize task groups and
+	control disk bandwidth allocation (proportional time slice allocation)
+	to such task groups.
+
+	This option only enables generic Block IO controller infrastructure.
+	One needs to also enable actual IO controlling logic in CFQ for it
+	to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y).
+
+	See Documentation/cgroups/blkio-controller.txt for more information.
+
+config DEBUG_BLK_CGROUP
+	bool "Enable Block IO controller debugging"
+	depends on BLK_CGROUP
+	default n
+	---help---
+	Enable some debugging help. Currently it exports additional stat
+	files in a cgroup which can be useful for debugging.
+
 endif # CGROUPS
 
 config MM_OWNER
diff --git a/init/main.c b/init/main.c
index 5c85402..22881b5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -62,6 +62,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/idr.h>
+#include <linux/kgdb.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
 #include <linux/kmemcheck.h>
@@ -675,6 +676,7 @@
 	buffer_init();
 	key_init();
 	security_init();
+	dbg_late_init();
 	vfs_caches_init(totalram_pages);
 	signals_init();
 	/* rootfs populating might need page-writeback */
diff --git a/kernel/Makefile b/kernel/Makefile
index 149e18e..057472f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -75,7 +75,7 @@
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
 obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
diff --git a/kernel/debug/Makefile b/kernel/debug/Makefile
new file mode 100644
index 0000000..a85edc3
--- /dev/null
+++ b/kernel/debug/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the linux kernel debugger
+#
+
+obj-$(CONFIG_KGDB) += debug_core.o gdbstub.o
+obj-$(CONFIG_KGDB_KDB) += kdb/
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
new file mode 100644
index 0000000..5cb7cd1
--- /dev/null
+++ b/kernel/debug/debug_core.c
@@ -0,0 +1,983 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ *  Jason Wessel ( jason.wessel@windriver.com )
+ *  George Anzinger <george@mvista.com>
+ *  Anurekh Saxena (anurekh.saxena@timesys.com)
+ *  Lake Stevens Instrument Division (Glenn Engel)
+ *  Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#include <linux/pid_namespace.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/threads.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/pid.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "debug_core.h"
+
+static int kgdb_break_asap;
+
+struct debuggerinfo_struct kgdb_info[NR_CPUS];
+
+/**
+ * kgdb_connected - Is a host GDB connected to us?
+ */
+int				kgdb_connected;
+EXPORT_SYMBOL_GPL(kgdb_connected);
+
+/* All the KGDB handlers are installed */
+int			kgdb_io_module_registered;
+
+/* Guard for recursive entry */
+static int			exception_level;
+
+struct kgdb_io		*dbg_io_ops;
+static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+/* kgdb console driver is loaded */
+static int kgdb_con_registered;
+/* determine if kgdb console output should be used */
+static int kgdb_use_con;
+/* Flag for alternate operations for early debugging */
+bool dbg_is_early = true;
+/* Next cpu to become the master debug core */
+int dbg_switch_cpu;
+
+/* Use kdb or gdbserver mode */
+int dbg_kdb_mode = 1;
+
+static int __init opt_kgdb_con(char *str)
+{
+	kgdb_use_con = 1;
+	return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
+module_param(kgdb_use_con, int, 0644);
+
+/*
+ * Holds information about breakpoints in a kernel. These breakpoints are
+ * added and removed by gdb.
+ */
+static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
+	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
+};
+
+/*
+ * The CPU# of the active CPU, or -1 if none:
+ */
+atomic_t			kgdb_active = ATOMIC_INIT(-1);
+EXPORT_SYMBOL_GPL(kgdb_active);
+
+/*
+ * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
+ * bootup code (which might not have percpu set up yet):
+ */
+static atomic_t			passive_cpu_wait[NR_CPUS];
+static atomic_t			cpu_in_kgdb[NR_CPUS];
+static atomic_t			kgdb_break_tasklet_var;
+atomic_t			kgdb_setting_breakpoint;
+
+struct task_struct		*kgdb_usethread;
+struct task_struct		*kgdb_contthread;
+
+int				kgdb_single_step;
+static pid_t			kgdb_sstep_pid;
+
+/* to keep track of the CPU which is doing the single stepping*/
+atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+/*
+ * If you are debugging a problem where roundup (the collection of
+ * all other CPUs) is a problem [this should be extremely rare],
+ * then use the nokgdbroundup option to avoid roundup. In that case
+ * the other CPUs might interfere with your debugging context, so
+ * use this with care:
+ */
+static int kgdb_do_roundup = 1;
+
+static int __init opt_nokgdbroundup(char *str)
+{
+	kgdb_do_roundup = 0;
+
+	return 0;
+}
+
+early_param("nokgdbroundup", opt_nokgdbroundup);
+
+/*
+ * Finally, some KGDB code :-)
+ */
+
+/*
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+{
+	int err;
+
+	err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+
+	return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+				  BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+{
+	return probe_kernel_write((char *)addr,
+				  (char *)bundle, BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_validate_break_address(unsigned long addr)
+{
+	char tmp_variable[BREAK_INSTR_SIZE];
+	int err;
+	/* Validate setting the breakpoint and then removing it.  In the
+	 * remove fails, the kernel needs to emit a bad message because we
+	 * are deep trouble not being able to put things back the way we
+	 * found them.
+	 */
+	err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+	if (err)
+		return err;
+	err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+	if (err)
+		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
+		   "memory destroyed at: %lx", addr);
+	return err;
+}
+
+unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+int __weak kgdb_arch_init(void)
+{
+	return 0;
+}
+
+int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+	return 0;
+}
+
+/**
+ *	kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ *	@regs: Current &struct pt_regs.
+ *
+ *	This function will be called if the particular architecture must
+ *	disable hardware debugging while it is processing gdb packets or
+ *	handling exception.
+ */
+void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+}
+
+/*
+ * Some architectures need cache flushes when we set/clear a
+ * breakpoint:
+ */
+static void kgdb_flush_swbreak_addr(unsigned long addr)
+{
+	if (!CACHE_FLUSH_IS_SAFE)
+		return;
+
+	if (current->mm && current->mm->mmap_cache) {
+		flush_cache_range(current->mm->mmap_cache,
+				  addr, addr + BREAK_INSTR_SIZE);
+	}
+	/* Force flush instruction cache if it was outside the mm */
+	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * SW breakpoint management:
+ */
+int dbg_activate_sw_breakpoints(void)
+{
+	unsigned long addr;
+	int error;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_SET)
+			continue;
+
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_set_breakpoint(addr,
+				kgdb_break[i].saved_instr);
+		if (error) {
+			ret = error;
+			printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+			continue;
+		}
+
+		kgdb_flush_swbreak_addr(addr);
+		kgdb_break[i].state = BP_ACTIVE;
+	}
+	return ret;
+}
+
+int dbg_set_sw_break(unsigned long addr)
+{
+	int err = kgdb_validate_break_address(addr);
+	int breakno = -1;
+	int i;
+
+	if (err)
+		return err;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_SET) &&
+					(kgdb_break[i].bpt_addr == addr))
+			return -EEXIST;
+	}
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state == BP_REMOVED &&
+					kgdb_break[i].bpt_addr == addr) {
+			breakno = i;
+			break;
+		}
+	}
+
+	if (breakno == -1) {
+		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+			if (kgdb_break[i].state == BP_UNDEFINED) {
+				breakno = i;
+				break;
+			}
+		}
+	}
+
+	if (breakno == -1)
+		return -E2BIG;
+
+	kgdb_break[breakno].state = BP_SET;
+	kgdb_break[breakno].type = BP_BREAKPOINT;
+	kgdb_break[breakno].bpt_addr = addr;
+
+	return 0;
+}
+
+int dbg_deactivate_sw_breakpoints(void)
+{
+	unsigned long addr;
+	int error;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_ACTIVE)
+			continue;
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_remove_breakpoint(addr,
+					kgdb_break[i].saved_instr);
+		if (error) {
+			printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+			ret = error;
+		}
+
+		kgdb_flush_swbreak_addr(addr);
+		kgdb_break[i].state = BP_SET;
+	}
+	return ret;
+}
+
+int dbg_remove_sw_break(unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_SET) &&
+				(kgdb_break[i].bpt_addr == addr)) {
+			kgdb_break[i].state = BP_REMOVED;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+int kgdb_isremovedbreak(unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if ((kgdb_break[i].state == BP_REMOVED) &&
+					(kgdb_break[i].bpt_addr == addr))
+			return 1;
+	}
+	return 0;
+}
+
+int dbg_remove_all_break(void)
+{
+	unsigned long addr;
+	int error;
+	int i;
+
+	/* Clear memory breakpoints. */
+	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+		if (kgdb_break[i].state != BP_ACTIVE)
+			goto setundefined;
+		addr = kgdb_break[i].bpt_addr;
+		error = kgdb_arch_remove_breakpoint(addr,
+				kgdb_break[i].saved_instr);
+		if (error)
+			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+			   addr);
+setundefined:
+		kgdb_break[i].state = BP_UNDEFINED;
+	}
+
+	/* Clear hardware breakpoints. */
+	if (arch_kgdb_ops.remove_all_hw_break)
+		arch_kgdb_ops.remove_all_hw_break();
+
+	return 0;
+}
+
+/*
+ * Return true if there is a valid kgdb I/O module.  Also if no
+ * debugger is attached a message can be printed to the console about
+ * waiting for the debugger to attach.
+ *
+ * The print_wait argument is only to be true when called from inside
+ * the core kgdb_handle_exception, because it will wait for the
+ * debugger to attach.
+ */
+static int kgdb_io_ready(int print_wait)
+{
+	if (!dbg_io_ops)
+		return 0;
+	if (kgdb_connected)
+		return 1;
+	if (atomic_read(&kgdb_setting_breakpoint))
+		return 1;
+	if (print_wait) {
+#ifdef CONFIG_KGDB_KDB
+		if (!dbg_kdb_mode)
+			printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+#else
+		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+#endif
+	}
+	return 1;
+}
+
+static int kgdb_reenter_check(struct kgdb_state *ks)
+{
+	unsigned long addr;
+
+	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
+		return 0;
+
+	/* Panic on recursive debugger calls: */
+	exception_level++;
+	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+	dbg_deactivate_sw_breakpoints();
+
+	/*
+	 * If the break point removed ok at the place exception
+	 * occurred, try to recover and print a warning to the end
+	 * user because the user planted a breakpoint in a place that
+	 * KGDB needs in order to function.
+	 */
+	if (dbg_remove_sw_break(addr) == 0) {
+		exception_level = 0;
+		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+		dbg_activate_sw_breakpoints();
+		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
+			addr);
+		WARN_ON_ONCE(1);
+
+		return 1;
+	}
+	dbg_remove_all_break();
+	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+
+	if (exception_level > 1) {
+		dump_stack();
+		panic("Recursive entry to debugger");
+	}
+
+	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+#ifdef CONFIG_KGDB_KDB
+	/* Allow kdb to debug itself one level */
+	return 0;
+#endif
+	dump_stack();
+	panic("Recursive entry to debugger");
+
+	return 1;
+}
+
+static void dbg_cpu_switch(int cpu, int next_cpu)
+{
+	/* Mark the cpu we are switching away from as a slave when it
+	 * holds the kgdb_active token.  This must be done so that the
+	 * that all the cpus wait in for the debug core will not enter
+	 * again as the master. */
+	if (cpu == atomic_read(&kgdb_active)) {
+		kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+		kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
+	}
+	kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
+}
+
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
+{
+	unsigned long flags;
+	int sstep_tries = 100;
+	int error;
+	int i, cpu;
+	int trace_on = 0;
+acquirelock:
+	/*
+	 * Interrupts will be restored by the 'trap return' code, except when
+	 * single stepping.
+	 */
+	local_irq_save(flags);
+
+	cpu = ks->cpu;
+	kgdb_info[cpu].debuggerinfo = regs;
+	kgdb_info[cpu].task = current;
+	kgdb_info[cpu].ret_state = 0;
+	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
+	/*
+	 * Make sure the above info reaches the primary CPU before
+	 * our cpu_in_kgdb[] flag setting does:
+	 */
+	atomic_inc(&cpu_in_kgdb[cpu]);
+
+	if (exception_level == 1)
+		goto cpu_master_loop;
+
+	/*
+	 * CPU will loop if it is a slave or request to become a kgdb
+	 * master cpu and acquire the kgdb_active lock:
+	 */
+	while (1) {
+cpu_loop:
+		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
+			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
+			goto cpu_master_loop;
+		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+			if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+				break;
+		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+			if (!atomic_read(&passive_cpu_wait[cpu]))
+				goto return_normal;
+		} else {
+return_normal:
+			/* Return to normal operation by executing any
+			 * hw breakpoint fixup.
+			 */
+			if (arch_kgdb_ops.correct_hw_break)
+				arch_kgdb_ops.correct_hw_break();
+			if (trace_on)
+				tracing_on();
+			atomic_dec(&cpu_in_kgdb[cpu]);
+			touch_softlockup_watchdog_sync();
+			clocksource_touch_watchdog();
+			local_irq_restore(flags);
+			return 0;
+		}
+		cpu_relax();
+	}
+
+	/*
+	 * For single stepping, try to only enter on the processor
+	 * that was single stepping.  To gaurd against a deadlock, the
+	 * kernel will only try for the value of sstep_tries before
+	 * giving up and continuing on.
+	 */
+	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+	    (kgdb_info[cpu].task &&
+	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+		atomic_set(&kgdb_active, -1);
+		touch_softlockup_watchdog_sync();
+		clocksource_touch_watchdog();
+		local_irq_restore(flags);
+
+		goto acquirelock;
+	}
+
+	if (!kgdb_io_ready(1)) {
+		kgdb_info[cpu].ret_state = 1;
+		goto kgdb_restore; /* No I/O connection, resume the system */
+	}
+
+	/*
+	 * Don't enter if we have hit a removed breakpoint.
+	 */
+	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
+		goto kgdb_restore;
+
+	/* Call the I/O driver's pre_exception routine */
+	if (dbg_io_ops->pre_exception)
+		dbg_io_ops->pre_exception();
+
+	kgdb_disable_hw_debug(ks->linux_regs);
+
+	/*
+	 * Get the passive CPU lock which will hold all the non-primary
+	 * CPU in a spin state while the debugger is active
+	 */
+	if (!kgdb_single_step) {
+		for (i = 0; i < NR_CPUS; i++)
+			atomic_inc(&passive_cpu_wait[i]);
+	}
+
+#ifdef CONFIG_SMP
+	/* Signal the other CPUs to enter kgdb_wait() */
+	if ((!kgdb_single_step) && kgdb_do_roundup)
+		kgdb_roundup_cpus(flags);
+#endif
+
+	/*
+	 * Wait for the other CPUs to be notified and be waiting for us:
+	 */
+	for_each_online_cpu(i) {
+		while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
+			cpu_relax();
+	}
+
+	/*
+	 * At this point the primary processor is completely
+	 * in the debugger and all secondary CPUs are quiescent
+	 */
+	dbg_deactivate_sw_breakpoints();
+	kgdb_single_step = 0;
+	kgdb_contthread = current;
+	exception_level = 0;
+	trace_on = tracing_is_on();
+	if (trace_on)
+		tracing_off();
+
+	while (1) {
+cpu_master_loop:
+		if (dbg_kdb_mode) {
+			kgdb_connected = 1;
+			error = kdb_stub(ks);
+		} else {
+			error = gdb_serial_stub(ks);
+		}
+
+		if (error == DBG_PASS_EVENT) {
+			dbg_kdb_mode = !dbg_kdb_mode;
+			kgdb_connected = 0;
+		} else if (error == DBG_SWITCH_CPU_EVENT) {
+			dbg_cpu_switch(cpu, dbg_switch_cpu);
+			goto cpu_loop;
+		} else {
+			kgdb_info[cpu].ret_state = error;
+			break;
+		}
+	}
+
+	/* Call the I/O driver's post_exception routine */
+	if (dbg_io_ops->post_exception)
+		dbg_io_ops->post_exception();
+
+	atomic_dec(&cpu_in_kgdb[ks->cpu]);
+
+	if (!kgdb_single_step) {
+		for (i = NR_CPUS-1; i >= 0; i--)
+			atomic_dec(&passive_cpu_wait[i]);
+		/*
+		 * Wait till all the CPUs have quit from the debugger,
+		 * but allow a CPU that hit an exception and is
+		 * waiting to become the master to remain in the debug
+		 * core.
+		 */
+		for_each_online_cpu(i) {
+			while (kgdb_do_roundup &&
+			       atomic_read(&cpu_in_kgdb[i]) &&
+			       !(kgdb_info[i].exception_state &
+				 DCPU_WANT_MASTER))
+				cpu_relax();
+		}
+	}
+
+kgdb_restore:
+	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+		if (kgdb_info[sstep_cpu].task)
+			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+		else
+			kgdb_sstep_pid = 0;
+	}
+	if (trace_on)
+		tracing_on();
+	/* Free kgdb_active */
+	atomic_set(&kgdb_active, -1);
+	touch_softlockup_watchdog_sync();
+	clocksource_touch_watchdog();
+	local_irq_restore(flags);
+
+	return kgdb_info[cpu].ret_state;
+}
+
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ *	interface locks, if any (begin_session)
+ *	kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+	struct kgdb_state kgdb_var;
+	struct kgdb_state *ks = &kgdb_var;
+	int ret;
+
+	ks->cpu			= raw_smp_processor_id();
+	ks->ex_vector		= evector;
+	ks->signo		= signo;
+	ks->err_code		= ecode;
+	ks->kgdb_usethreadid	= 0;
+	ks->linux_regs		= regs;
+
+	if (kgdb_reenter_check(ks))
+		return 0; /* Ouch, double exception ! */
+	kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+	ret = kgdb_cpu_enter(ks, regs);
+	kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
+						DCPU_IS_SLAVE);
+	return ret;
+}
+
+int kgdb_nmicallback(int cpu, void *regs)
+{
+#ifdef CONFIG_SMP
+	struct kgdb_state kgdb_var;
+	struct kgdb_state *ks = &kgdb_var;
+
+	memset(ks, 0, sizeof(struct kgdb_state));
+	ks->cpu			= cpu;
+	ks->linux_regs		= regs;
+
+	if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+	    atomic_read(&kgdb_active) != -1 &&
+	    atomic_read(&kgdb_active) != cpu) {
+		kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+		kgdb_cpu_enter(ks, regs);
+		kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+static void kgdb_console_write(struct console *co, const char *s,
+   unsigned count)
+{
+	unsigned long flags;
+
+	/* If we're debugging, or KGDB has not connected, don't try
+	 * and print. */
+	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
+		return;
+
+	local_irq_save(flags);
+	gdbstub_msg_write(s, count);
+	local_irq_restore(flags);
+}
+
+static struct console kgdbcons = {
+	.name		= "kgdb",
+	.write		= kgdb_console_write,
+	.flags		= CON_PRINTBUFFER | CON_ENABLED,
+	.index		= -1,
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+{
+	if (!dbg_io_ops) {
+		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+		return;
+	}
+	if (!kgdb_connected) {
+#ifdef CONFIG_KGDB_KDB
+		if (!dbg_kdb_mode)
+			printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+#else
+		printk(KERN_CRIT "Entering KGDB\n");
+#endif
+	}
+
+	kgdb_breakpoint();
+}
+
+static struct sysrq_key_op sysrq_dbg_op = {
+	.handler	= sysrq_handle_dbg,
+	.help_msg	= "debug(G)",
+	.action_msg	= "DEBUG",
+};
+#endif
+
+static int kgdb_panic_event(struct notifier_block *self,
+			    unsigned long val,
+			    void *data)
+{
+	if (dbg_kdb_mode)
+		kdb_printf("PANIC: %s\n", (char *)data);
+	kgdb_breakpoint();
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block kgdb_panic_event_nb = {
+       .notifier_call	= kgdb_panic_event,
+       .priority	= INT_MAX,
+};
+
+void __weak kgdb_arch_late(void)
+{
+}
+
+void __init dbg_late_init(void)
+{
+	dbg_is_early = false;
+	if (kgdb_io_module_registered)
+		kgdb_arch_late();
+	kdb_init(KDB_INIT_FULL);
+}
+
+static void kgdb_register_callbacks(void)
+{
+	if (!kgdb_io_module_registered) {
+		kgdb_io_module_registered = 1;
+		kgdb_arch_init();
+		if (!dbg_is_early)
+			kgdb_arch_late();
+		atomic_notifier_chain_register(&panic_notifier_list,
+					       &kgdb_panic_event_nb);
+#ifdef CONFIG_MAGIC_SYSRQ
+		register_sysrq_key('g', &sysrq_dbg_op);
+#endif
+		if (kgdb_use_con && !kgdb_con_registered) {
+			register_console(&kgdbcons);
+			kgdb_con_registered = 1;
+		}
+	}
+}
+
+static void kgdb_unregister_callbacks(void)
+{
+	/*
+	 * When this routine is called KGDB should unregister from the
+	 * panic handler and clean up, making sure it is not handling any
+	 * break exceptions at the time.
+	 */
+	if (kgdb_io_module_registered) {
+		kgdb_io_module_registered = 0;
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+					       &kgdb_panic_event_nb);
+		kgdb_arch_exit();
+#ifdef CONFIG_MAGIC_SYSRQ
+		unregister_sysrq_key('g', &sysrq_dbg_op);
+#endif
+		if (kgdb_con_registered) {
+			unregister_console(&kgdbcons);
+			kgdb_con_registered = 0;
+		}
+	}
+}
+
+/*
+ * There are times a tasklet needs to be used vs a compiled in
+ * break point so as to cause an exception outside a kgdb I/O module,
+ * such as is the case with kgdboe, where calling a breakpoint in the
+ * I/O driver itself would be fatal.
+ */
+static void kgdb_tasklet_bpt(unsigned long ing)
+{
+	kgdb_breakpoint();
+	atomic_set(&kgdb_break_tasklet_var, 0);
+}
+
+static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+void kgdb_schedule_breakpoint(void)
+{
+	if (atomic_read(&kgdb_break_tasklet_var) ||
+		atomic_read(&kgdb_active) != -1 ||
+		atomic_read(&kgdb_setting_breakpoint))
+		return;
+	atomic_inc(&kgdb_break_tasklet_var);
+	tasklet_schedule(&kgdb_tasklet_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+
+static void kgdb_initial_breakpoint(void)
+{
+	kgdb_break_asap = 0;
+
+	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+	kgdb_breakpoint();
+}
+
+/**
+ *	kgdb_register_io_module - register KGDB IO module
+ *	@new_dbg_io_ops: the io ops vector
+ *
+ *	Register it with the KGDB core.
+ */
+int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
+{
+	int err;
+
+	spin_lock(&kgdb_registration_lock);
+
+	if (dbg_io_ops) {
+		spin_unlock(&kgdb_registration_lock);
+
+		printk(KERN_ERR "kgdb: Another I/O driver is already "
+				"registered with KGDB.\n");
+		return -EBUSY;
+	}
+
+	if (new_dbg_io_ops->init) {
+		err = new_dbg_io_ops->init();
+		if (err) {
+			spin_unlock(&kgdb_registration_lock);
+			return err;
+		}
+	}
+
+	dbg_io_ops = new_dbg_io_ops;
+
+	spin_unlock(&kgdb_registration_lock);
+
+	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
+	       new_dbg_io_ops->name);
+
+	/* Arm KGDB now. */
+	kgdb_register_callbacks();
+
+	if (kgdb_break_asap)
+		kgdb_initial_breakpoint();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kgdb_register_io_module);
+
+/**
+ *	kkgdb_unregister_io_module - unregister KGDB IO module
+ *	@old_dbg_io_ops: the io ops vector
+ *
+ *	Unregister it with the KGDB core.
+ */
+void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
+{
+	BUG_ON(kgdb_connected);
+
+	/*
+	 * KGDB is no longer able to communicate out, so
+	 * unregister our callbacks and reset state.
+	 */
+	kgdb_unregister_callbacks();
+
+	spin_lock(&kgdb_registration_lock);
+
+	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
+	dbg_io_ops = NULL;
+
+	spin_unlock(&kgdb_registration_lock);
+
+	printk(KERN_INFO
+		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+		old_dbg_io_ops->name);
+}
+EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+
+int dbg_io_get_char(void)
+{
+	int ret = dbg_io_ops->read_char();
+	if (ret == NO_POLL_CHAR)
+		return -1;
+	if (!dbg_kdb_mode)
+		return ret;
+	if (ret == 127)
+		return 8;
+	return ret;
+}
+
+/**
+ * kgdb_breakpoint - generate breakpoint exception
+ *
+ * This function will generate a breakpoint exception.  It is used at the
+ * beginning of a program to sync up with a debugger and can be used
+ * otherwise as a quick means to stop program execution and "break" into
+ * the debugger.
+ */
+void kgdb_breakpoint(void)
+{
+	atomic_inc(&kgdb_setting_breakpoint);
+	wmb(); /* Sync point before breakpoint */
+	arch_kgdb_breakpoint();
+	wmb(); /* Sync point after breakpoint */
+	atomic_dec(&kgdb_setting_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_breakpoint);
+
+static int __init opt_kgdb_wait(char *str)
+{
+	kgdb_break_asap = 1;
+
+	kdb_init(KDB_INIT_EARLY);
+	if (kgdb_io_module_registered)
+		kgdb_initial_breakpoint();
+
+	return 0;
+}
+
+early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
new file mode 100644
index 0000000..c5d753d
--- /dev/null
+++ b/kernel/debug/debug_core.h
@@ -0,0 +1,81 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _DEBUG_CORE_H_
+#define _DEBUG_CORE_H_
+/*
+ * These are the private implementation headers between the kernel
+ * debugger core and the debugger front end code.
+ */
+
+/* kernel debug core data structures */
+struct kgdb_state {
+	int			ex_vector;
+	int			signo;
+	int			err_code;
+	int			cpu;
+	int			pass_exception;
+	unsigned long		thr_query;
+	unsigned long		threadid;
+	long			kgdb_usethreadid;
+	struct pt_regs		*linux_regs;
+};
+
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE    0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP       0x8 /* CPU is single stepping */
+
+struct debuggerinfo_struct {
+	void			*debuggerinfo;
+	struct task_struct	*task;
+	int			exception_state;
+	int			ret_state;
+	int			irq_depth;
+};
+
+extern struct debuggerinfo_struct kgdb_info[];
+
+/* kernel debug core break point routines */
+extern int dbg_remove_all_break(void);
+extern int dbg_set_sw_break(unsigned long addr);
+extern int dbg_remove_sw_break(unsigned long addr);
+extern int dbg_activate_sw_breakpoints(void);
+extern int dbg_deactivate_sw_breakpoints(void);
+
+/* polled character access to i/o module */
+extern int dbg_io_get_char(void);
+
+/* stub return value for switching between the gdbstub and kdb */
+#define DBG_PASS_EVENT -12345
+/* Switch from one cpu to another */
+#define DBG_SWITCH_CPU_EVENT -123456
+extern int dbg_switch_cpu;
+
+/* gdbstub interface functions */
+extern int gdb_serial_stub(struct kgdb_state *ks);
+extern void gdbstub_msg_write(const char *s, int len);
+
+/* gdbstub functions used for kdb <-> gdbstub transition */
+extern int gdbstub_state(struct kgdb_state *ks, char *cmd);
+extern int dbg_kdb_mode;
+
+#ifdef CONFIG_KGDB_KDB
+extern int kdb_stub(struct kgdb_state *ks);
+extern int kdb_parse(const char *cmdstr);
+#else /* ! CONFIG_KGDB_KDB */
+static inline int kdb_stub(struct kgdb_state *ks)
+{
+	return DBG_PASS_EVENT;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+#endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
new file mode 100644
index 0000000..4b17b32
--- /dev/null
+++ b/kernel/debug/gdbstub.c
@@ -0,0 +1,1017 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ *  Jason Wessel ( jason.wessel@windriver.com )
+ *  George Anzinger <george@mvista.com>
+ *  Anurekh Saxena (anurekh.saxena@timesys.com)
+ *  Lake Stevens Instrument Division (Glenn Engel)
+ *  Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/unaligned.h>
+#include "debug_core.h"
+
+#define KGDB_MAX_THREAD_QUERY 17
+
+/* Our I/O buffers. */
+static char			remcom_in_buffer[BUFMAX];
+static char			remcom_out_buffer[BUFMAX];
+
+/* Storage for the registers, in GDB format. */
+static unsigned long		gdb_regs[(NUMREGBYTES +
+					sizeof(unsigned long) - 1) /
+					sizeof(unsigned long)];
+
+/*
+ * GDB remote protocol parser:
+ */
+
+static int hex(char ch)
+{
+	if ((ch >= 'a') && (ch <= 'f'))
+		return ch - 'a' + 10;
+	if ((ch >= '0') && (ch <= '9'))
+		return ch - '0';
+	if ((ch >= 'A') && (ch <= 'F'))
+		return ch - 'A' + 10;
+	return -1;
+}
+
+#ifdef CONFIG_KGDB_KDB
+static int gdbstub_read_wait(void)
+{
+	int ret = -1;
+	int i;
+
+	/* poll any additional I/O interfaces that are defined */
+	while (ret < 0)
+		for (i = 0; kdb_poll_funcs[i] != NULL; i++) {
+			ret = kdb_poll_funcs[i]();
+			if (ret > 0)
+				break;
+		}
+	return ret;
+}
+#else
+static int gdbstub_read_wait(void)
+{
+	int ret = dbg_io_ops->read_char();
+	while (ret == NO_POLL_CHAR)
+		ret = dbg_io_ops->read_char();
+	return ret;
+}
+#endif
+/* scan for the sequence $<data>#<checksum> */
+static void get_packet(char *buffer)
+{
+	unsigned char checksum;
+	unsigned char xmitcsum;
+	int count;
+	char ch;
+
+	do {
+		/*
+		 * Spin and wait around for the start character, ignore all
+		 * other characters:
+		 */
+		while ((ch = (gdbstub_read_wait())) != '$')
+			/* nothing */;
+
+		kgdb_connected = 1;
+		checksum = 0;
+		xmitcsum = -1;
+
+		count = 0;
+
+		/*
+		 * now, read until a # or end of buffer is found:
+		 */
+		while (count < (BUFMAX - 1)) {
+			ch = gdbstub_read_wait();
+			if (ch == '#')
+				break;
+			checksum = checksum + ch;
+			buffer[count] = ch;
+			count = count + 1;
+		}
+		buffer[count] = 0;
+
+		if (ch == '#') {
+			xmitcsum = hex(gdbstub_read_wait()) << 4;
+			xmitcsum += hex(gdbstub_read_wait());
+
+			if (checksum != xmitcsum)
+				/* failed checksum */
+				dbg_io_ops->write_char('-');
+			else
+				/* successful transfer */
+				dbg_io_ops->write_char('+');
+			if (dbg_io_ops->flush)
+				dbg_io_ops->flush();
+		}
+	} while (checksum != xmitcsum);
+}
+
+/*
+ * Send the packet in buffer.
+ * Check for gdb connection if asked for.
+ */
+static void put_packet(char *buffer)
+{
+	unsigned char checksum;
+	int count;
+	char ch;
+
+	/*
+	 * $<packet info>#<checksum>.
+	 */
+	while (1) {
+		dbg_io_ops->write_char('$');
+		checksum = 0;
+		count = 0;
+
+		while ((ch = buffer[count])) {
+			dbg_io_ops->write_char(ch);
+			checksum += ch;
+			count++;
+		}
+
+		dbg_io_ops->write_char('#');
+		dbg_io_ops->write_char(hex_asc_hi(checksum));
+		dbg_io_ops->write_char(hex_asc_lo(checksum));
+		if (dbg_io_ops->flush)
+			dbg_io_ops->flush();
+
+		/* Now see what we get in reply. */
+		ch = gdbstub_read_wait();
+
+		if (ch == 3)
+			ch = gdbstub_read_wait();
+
+		/* If we get an ACK, we are done. */
+		if (ch == '+')
+			return;
+
+		/*
+		 * If we get the start of another packet, this means
+		 * that GDB is attempting to reconnect.  We will NAK
+		 * the packet being sent, and stop trying to send this
+		 * packet.
+		 */
+		if (ch == '$') {
+			dbg_io_ops->write_char('-');
+			if (dbg_io_ops->flush)
+				dbg_io_ops->flush();
+			return;
+		}
+	}
+}
+
+static char gdbmsgbuf[BUFMAX + 1];
+
+void gdbstub_msg_write(const char *s, int len)
+{
+	char *bufptr;
+	int wcount;
+	int i;
+
+	if (len == 0)
+		len = strlen(s);
+
+	/* 'O'utput */
+	gdbmsgbuf[0] = 'O';
+
+	/* Fill and send buffers... */
+	while (len > 0) {
+		bufptr = gdbmsgbuf + 1;
+
+		/* Calculate how many this time */
+		if ((len << 1) > (BUFMAX - 2))
+			wcount = (BUFMAX - 2) >> 1;
+		else
+			wcount = len;
+
+		/* Pack in hex chars */
+		for (i = 0; i < wcount; i++)
+			bufptr = pack_hex_byte(bufptr, s[i]);
+		*bufptr = '\0';
+
+		/* Move up */
+		s += wcount;
+		len -= wcount;
+
+		/* Write packet */
+		put_packet(gdbmsgbuf);
+	}
+}
+
+/*
+ * Convert the memory pointed to by mem into hex, placing result in
+ * buf.  Return a pointer to the last char put in buf (null). May
+ * return an error.
+ */
+int kgdb_mem2hex(char *mem, char *buf, int count)
+{
+	char *tmp;
+	int err;
+
+	/*
+	 * We use the upper half of buf as an intermediate buffer for the
+	 * raw memory copy.  Hex conversion will work against this one.
+	 */
+	tmp = buf + count;
+
+	err = probe_kernel_read(tmp, mem, count);
+	if (!err) {
+		while (count > 0) {
+			buf = pack_hex_byte(buf, *tmp);
+			tmp++;
+			count--;
+		}
+
+		*buf = 0;
+	}
+
+	return err;
+}
+
+/*
+ * Convert the hex array pointed to by buf into binary to be placed in
+ * mem.  Return a pointer to the character AFTER the last byte
+ * written.  May return an error.
+ */
+int kgdb_hex2mem(char *buf, char *mem, int count)
+{
+	char *tmp_raw;
+	char *tmp_hex;
+
+	/*
+	 * We use the upper half of buf as an intermediate buffer for the
+	 * raw memory that is converted from hex.
+	 */
+	tmp_raw = buf + count * 2;
+
+	tmp_hex = tmp_raw - 1;
+	while (tmp_hex >= buf) {
+		tmp_raw--;
+		*tmp_raw = hex(*tmp_hex--);
+		*tmp_raw |= hex(*tmp_hex--) << 4;
+	}
+
+	return probe_kernel_write(mem, tmp_raw, count);
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int kgdb_hex2long(char **ptr, unsigned long *long_val)
+{
+	int hex_val;
+	int num = 0;
+	int negate = 0;
+
+	*long_val = 0;
+
+	if (**ptr == '-') {
+		negate = 1;
+		(*ptr)++;
+	}
+	while (**ptr) {
+		hex_val = hex(**ptr);
+		if (hex_val < 0)
+			break;
+
+		*long_val = (*long_val << 4) | hex_val;
+		num++;
+		(*ptr)++;
+	}
+
+	if (negate)
+		*long_val = -*long_val;
+
+	return num;
+}
+
+/*
+ * Copy the binary array pointed to by buf into mem.  Fix $, #, and
+ * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
+ * The input buf is overwitten with the result to write to mem.
+ */
+static int kgdb_ebin2mem(char *buf, char *mem, int count)
+{
+	int size = 0;
+	char *c = buf;
+
+	while (count-- > 0) {
+		c[size] = *buf++;
+		if (c[size] == 0x7d)
+			c[size] = *buf++ ^ 0x20;
+		size++;
+	}
+
+	return probe_kernel_write(mem, c, size);
+}
+
+/* Write memory due to an 'M' or 'X' packet. */
+static int write_mem_msg(int binary)
+{
+	char *ptr = &remcom_in_buffer[1];
+	unsigned long addr;
+	unsigned long length;
+	int err;
+
+	if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
+	    kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
+		if (binary)
+			err = kgdb_ebin2mem(ptr, (char *)addr, length);
+		else
+			err = kgdb_hex2mem(ptr, (char *)addr, length);
+		if (err)
+			return err;
+		if (CACHE_FLUSH_IS_SAFE)
+			flush_icache_range(addr, addr + length);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void error_packet(char *pkt, int error)
+{
+	error = -error;
+	pkt[0] = 'E';
+	pkt[1] = hex_asc[(error / 10)];
+	pkt[2] = hex_asc[(error % 10)];
+	pkt[3] = '\0';
+}
+
+/*
+ * Thread ID accessors. We represent a flat TID space to GDB, where
+ * the per CPU idle threads (which under Linux all have PID 0) are
+ * remapped to negative TIDs.
+ */
+
+#define BUF_THREAD_ID_SIZE	16
+
+static char *pack_threadid(char *pkt, unsigned char *id)
+{
+	char *limit;
+
+	limit = pkt + BUF_THREAD_ID_SIZE;
+	while (pkt < limit)
+		pkt = pack_hex_byte(pkt, *id++);
+
+	return pkt;
+}
+
+static void int_to_threadref(unsigned char *id, int value)
+{
+	unsigned char *scan;
+	int i = 4;
+
+	scan = (unsigned char *)id;
+	while (i--)
+		*scan++ = 0;
+	put_unaligned_be32(value, scan);
+}
+
+static struct task_struct *getthread(struct pt_regs *regs, int tid)
+{
+	/*
+	 * Non-positive TIDs are remapped to the cpu shadow information
+	 */
+	if (tid == 0 || tid == -1)
+		tid = -atomic_read(&kgdb_active) - 2;
+	if (tid < -1 && tid > -NR_CPUS - 2) {
+		if (kgdb_info[-tid - 2].task)
+			return kgdb_info[-tid - 2].task;
+		else
+			return idle_task(-tid - 2);
+	}
+	if (tid <= 0) {
+		printk(KERN_ERR "KGDB: Internal thread select error\n");
+		dump_stack();
+		return NULL;
+	}
+
+	/*
+	 * find_task_by_pid_ns() does not take the tasklist lock anymore
+	 * but is nicely RCU locked - hence is a pretty resilient
+	 * thing to use:
+	 */
+	return find_task_by_pid_ns(tid, &init_pid_ns);
+}
+
+
+/*
+ * Remap normal tasks to their real PID,
+ * CPU shadow threads are mapped to -CPU - 2
+ */
+static inline int shadow_pid(int realpid)
+{
+	if (realpid)
+		return realpid;
+
+	return -raw_smp_processor_id() - 2;
+}
+
+/*
+ * All the functions that start with gdb_cmd are the various
+ * operations to implement the handlers for the gdbserial protocol
+ * where KGDB is communicating with an external debugger
+ */
+
+/* Handle the '?' status packets */
+static void gdb_cmd_status(struct kgdb_state *ks)
+{
+	/*
+	 * We know that this packet is only sent
+	 * during initial connect.  So to be safe,
+	 * we clear out our breakpoints now in case
+	 * GDB is reconnecting.
+	 */
+	dbg_remove_all_break();
+
+	remcom_out_buffer[0] = 'S';
+	pack_hex_byte(&remcom_out_buffer[1], ks->signo);
+}
+
+/* Handle the 'g' get registers request */
+static void gdb_cmd_getregs(struct kgdb_state *ks)
+{
+	struct task_struct *thread;
+	void *local_debuggerinfo;
+	int i;
+
+	thread = kgdb_usethread;
+	if (!thread) {
+		thread = kgdb_info[ks->cpu].task;
+		local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
+	} else {
+		local_debuggerinfo = NULL;
+		for_each_online_cpu(i) {
+			/*
+			 * Try to find the task on some other
+			 * or possibly this node if we do not
+			 * find the matching task then we try
+			 * to approximate the results.
+			 */
+			if (thread == kgdb_info[i].task)
+				local_debuggerinfo = kgdb_info[i].debuggerinfo;
+		}
+	}
+
+	/*
+	 * All threads that don't have debuggerinfo should be
+	 * in schedule() sleeping, since all other CPUs
+	 * are in kgdb_wait, and thus have debuggerinfo.
+	 */
+	if (local_debuggerinfo) {
+		pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
+	} else {
+		/*
+		 * Pull stuff saved during switch_to; nothing
+		 * else is accessible (or even particularly
+		 * relevant).
+		 *
+		 * This should be enough for a stack trace.
+		 */
+		sleeping_thread_to_gdb_regs(gdb_regs, thread);
+	}
+	kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
+}
+
+/* Handle the 'G' set registers request */
+static void gdb_cmd_setregs(struct kgdb_state *ks)
+{
+	kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
+
+	if (kgdb_usethread && kgdb_usethread != current) {
+		error_packet(remcom_out_buffer, -EINVAL);
+	} else {
+		gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
+		strcpy(remcom_out_buffer, "OK");
+	}
+}
+
+/* Handle the 'm' memory read bytes */
+static void gdb_cmd_memread(struct kgdb_state *ks)
+{
+	char *ptr = &remcom_in_buffer[1];
+	unsigned long length;
+	unsigned long addr;
+	int err;
+
+	if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
+					kgdb_hex2long(&ptr, &length) > 0) {
+		err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
+		if (err)
+			error_packet(remcom_out_buffer, err);
+	} else {
+		error_packet(remcom_out_buffer, -EINVAL);
+	}
+}
+
+/* Handle the 'M' memory write bytes */
+static void gdb_cmd_memwrite(struct kgdb_state *ks)
+{
+	int err = write_mem_msg(0);
+
+	if (err)
+		error_packet(remcom_out_buffer, err);
+	else
+		strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'X' memory binary write bytes */
+static void gdb_cmd_binwrite(struct kgdb_state *ks)
+{
+	int err = write_mem_msg(1);
+
+	if (err)
+		error_packet(remcom_out_buffer, err);
+	else
+		strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'D' or 'k', detach or kill packets */
+static void gdb_cmd_detachkill(struct kgdb_state *ks)
+{
+	int error;
+
+	/* The detach case */
+	if (remcom_in_buffer[0] == 'D') {
+		error = dbg_remove_all_break();
+		if (error < 0) {
+			error_packet(remcom_out_buffer, error);
+		} else {
+			strcpy(remcom_out_buffer, "OK");
+			kgdb_connected = 0;
+		}
+		put_packet(remcom_out_buffer);
+	} else {
+		/*
+		 * Assume the kill case, with no exit code checking,
+		 * trying to force detach the debugger:
+		 */
+		dbg_remove_all_break();
+		kgdb_connected = 0;
+	}
+}
+
+/* Handle the 'R' reboot packets */
+static int gdb_cmd_reboot(struct kgdb_state *ks)
+{
+	/* For now, only honor R0 */
+	if (strcmp(remcom_in_buffer, "R0") == 0) {
+		printk(KERN_CRIT "Executing emergency reboot\n");
+		strcpy(remcom_out_buffer, "OK");
+		put_packet(remcom_out_buffer);
+
+		/*
+		 * Execution should not return from
+		 * machine_emergency_restart()
+		 */
+		machine_emergency_restart();
+		kgdb_connected = 0;
+
+		return 1;
+	}
+	return 0;
+}
+
+/* Handle the 'q' query packets */
+static void gdb_cmd_query(struct kgdb_state *ks)
+{
+	struct task_struct *g;
+	struct task_struct *p;
+	unsigned char thref[8];
+	char *ptr;
+	int i;
+	int cpu;
+	int finished = 0;
+
+	switch (remcom_in_buffer[1]) {
+	case 's':
+	case 'f':
+		if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+
+		i = 0;
+		remcom_out_buffer[0] = 'm';
+		ptr = remcom_out_buffer + 1;
+		if (remcom_in_buffer[1] == 'f') {
+			/* Each cpu is a shadow thread */
+			for_each_online_cpu(cpu) {
+				ks->thr_query = 0;
+				int_to_threadref(thref, -cpu - 2);
+				pack_threadid(ptr, thref);
+				ptr += BUF_THREAD_ID_SIZE;
+				*(ptr++) = ',';
+				i++;
+			}
+		}
+
+		do_each_thread(g, p) {
+			if (i >= ks->thr_query && !finished) {
+				int_to_threadref(thref, p->pid);
+				pack_threadid(ptr, thref);
+				ptr += BUF_THREAD_ID_SIZE;
+				*(ptr++) = ',';
+				ks->thr_query++;
+				if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
+					finished = 1;
+			}
+			i++;
+		} while_each_thread(g, p);
+
+		*(--ptr) = '\0';
+		break;
+
+	case 'C':
+		/* Current thread id */
+		strcpy(remcom_out_buffer, "QC");
+		ks->threadid = shadow_pid(current->pid);
+		int_to_threadref(thref, ks->threadid);
+		pack_threadid(remcom_out_buffer + 2, thref);
+		break;
+	case 'T':
+		if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		ks->threadid = 0;
+		ptr = remcom_in_buffer + 17;
+		kgdb_hex2long(&ptr, &ks->threadid);
+		if (!getthread(ks->linux_regs, ks->threadid)) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		if ((int)ks->threadid > 0) {
+			kgdb_mem2hex(getthread(ks->linux_regs,
+					ks->threadid)->comm,
+					remcom_out_buffer, 16);
+		} else {
+			static char tmpstr[23 + BUF_THREAD_ID_SIZE];
+
+			sprintf(tmpstr, "shadowCPU%d",
+					(int)(-ks->threadid - 2));
+			kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
+		}
+		break;
+#ifdef CONFIG_KGDB_KDB
+	case 'R':
+		if (strncmp(remcom_in_buffer, "qRcmd,", 6) == 0) {
+			int len = strlen(remcom_in_buffer + 6);
+
+			if ((len % 2) != 0) {
+				strcpy(remcom_out_buffer, "E01");
+				break;
+			}
+			kgdb_hex2mem(remcom_in_buffer + 6,
+				     remcom_out_buffer, len);
+			len = len / 2;
+			remcom_out_buffer[len++] = 0;
+
+			kdb_parse(remcom_out_buffer);
+			strcpy(remcom_out_buffer, "OK");
+		}
+		break;
+#endif
+	}
+}
+
+/* Handle the 'H' task query packets */
+static void gdb_cmd_task(struct kgdb_state *ks)
+{
+	struct task_struct *thread;
+	char *ptr;
+
+	switch (remcom_in_buffer[1]) {
+	case 'g':
+		ptr = &remcom_in_buffer[2];
+		kgdb_hex2long(&ptr, &ks->threadid);
+		thread = getthread(ks->linux_regs, ks->threadid);
+		if (!thread && ks->threadid > 0) {
+			error_packet(remcom_out_buffer, -EINVAL);
+			break;
+		}
+		kgdb_usethread = thread;
+		ks->kgdb_usethreadid = ks->threadid;
+		strcpy(remcom_out_buffer, "OK");
+		break;
+	case 'c':
+		ptr = &remcom_in_buffer[2];
+		kgdb_hex2long(&ptr, &ks->threadid);
+		if (!ks->threadid) {
+			kgdb_contthread = NULL;
+		} else {
+			thread = getthread(ks->linux_regs, ks->threadid);
+			if (!thread && ks->threadid > 0) {
+				error_packet(remcom_out_buffer, -EINVAL);
+				break;
+			}
+			kgdb_contthread = thread;
+		}
+		strcpy(remcom_out_buffer, "OK");
+		break;
+	}
+}
+
+/* Handle the 'T' thread query packets */
+static void gdb_cmd_thread(struct kgdb_state *ks)
+{
+	char *ptr = &remcom_in_buffer[1];
+	struct task_struct *thread;
+
+	kgdb_hex2long(&ptr, &ks->threadid);
+	thread = getthread(ks->linux_regs, ks->threadid);
+	if (thread)
+		strcpy(remcom_out_buffer, "OK");
+	else
+		error_packet(remcom_out_buffer, -EINVAL);
+}
+
+/* Handle the 'z' or 'Z' breakpoint remove or set packets */
+static void gdb_cmd_break(struct kgdb_state *ks)
+{
+	/*
+	 * Since GDB-5.3, it's been drafted that '0' is a software
+	 * breakpoint, '1' is a hardware breakpoint, so let's do that.
+	 */
+	char *bpt_type = &remcom_in_buffer[1];
+	char *ptr = &remcom_in_buffer[2];
+	unsigned long addr;
+	unsigned long length;
+	int error = 0;
+
+	if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
+		/* Unsupported */
+		if (*bpt_type > '4')
+			return;
+	} else {
+		if (*bpt_type != '0' && *bpt_type != '1')
+			/* Unsupported. */
+			return;
+	}
+
+	/*
+	 * Test if this is a hardware breakpoint, and
+	 * if we support it:
+	 */
+	if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
+		/* Unsupported. */
+		return;
+
+	if (*(ptr++) != ',') {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+	if (!kgdb_hex2long(&ptr, &addr)) {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+	if (*(ptr++) != ',' ||
+		!kgdb_hex2long(&ptr, &length)) {
+		error_packet(remcom_out_buffer, -EINVAL);
+		return;
+	}
+
+	if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
+		error = dbg_set_sw_break(addr);
+	else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
+		error = dbg_remove_sw_break(addr);
+	else if (remcom_in_buffer[0] == 'Z')
+		error = arch_kgdb_ops.set_hw_breakpoint(addr,
+			(int)length, *bpt_type - '0');
+	else if (remcom_in_buffer[0] == 'z')
+		error = arch_kgdb_ops.remove_hw_breakpoint(addr,
+			(int) length, *bpt_type - '0');
+
+	if (error == 0)
+		strcpy(remcom_out_buffer, "OK");
+	else
+		error_packet(remcom_out_buffer, error);
+}
+
+/* Handle the 'C' signal / exception passing packets */
+static int gdb_cmd_exception_pass(struct kgdb_state *ks)
+{
+	/* C09 == pass exception
+	 * C15 == detach kgdb, pass exception
+	 */
+	if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
+
+		ks->pass_exception = 1;
+		remcom_in_buffer[0] = 'c';
+
+	} else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
+
+		ks->pass_exception = 1;
+		remcom_in_buffer[0] = 'D';
+		dbg_remove_all_break();
+		kgdb_connected = 0;
+		return 1;
+
+	} else {
+		gdbstub_msg_write("KGDB only knows signal 9 (pass)"
+			" and 15 (pass and disconnect)\n"
+			"Executing a continue without signal passing\n", 0);
+		remcom_in_buffer[0] = 'c';
+	}
+
+	/* Indicate fall through */
+	return -1;
+}
+
+/*
+ * This function performs all gdbserial command procesing
+ */
+int gdb_serial_stub(struct kgdb_state *ks)
+{
+	int error = 0;
+	int tmp;
+
+	/* Clear the out buffer. */
+	memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+	if (kgdb_connected) {
+		unsigned char thref[8];
+		char *ptr;
+
+		/* Reply to host that an exception has occurred */
+		ptr = remcom_out_buffer;
+		*ptr++ = 'T';
+		ptr = pack_hex_byte(ptr, ks->signo);
+		ptr += strlen(strcpy(ptr, "thread:"));
+		int_to_threadref(thref, shadow_pid(current->pid));
+		ptr = pack_threadid(ptr, thref);
+		*ptr++ = ';';
+		put_packet(remcom_out_buffer);
+	}
+
+	kgdb_usethread = kgdb_info[ks->cpu].task;
+	ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
+	ks->pass_exception = 0;
+
+	while (1) {
+		error = 0;
+
+		/* Clear the out buffer. */
+		memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+		get_packet(remcom_in_buffer);
+
+		switch (remcom_in_buffer[0]) {
+		case '?': /* gdbserial status */
+			gdb_cmd_status(ks);
+			break;
+		case 'g': /* return the value of the CPU registers */
+			gdb_cmd_getregs(ks);
+			break;
+		case 'G': /* set the value of the CPU registers - return OK */
+			gdb_cmd_setregs(ks);
+			break;
+		case 'm': /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
+			gdb_cmd_memread(ks);
+			break;
+		case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+			gdb_cmd_memwrite(ks);
+			break;
+		case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+			gdb_cmd_binwrite(ks);
+			break;
+			/* kill or detach. KGDB should treat this like a
+			 * continue.
+			 */
+		case 'D': /* Debugger detach */
+		case 'k': /* Debugger detach via kill */
+			gdb_cmd_detachkill(ks);
+			goto default_handle;
+		case 'R': /* Reboot */
+			if (gdb_cmd_reboot(ks))
+				goto default_handle;
+			break;
+		case 'q': /* query command */
+			gdb_cmd_query(ks);
+			break;
+		case 'H': /* task related */
+			gdb_cmd_task(ks);
+			break;
+		case 'T': /* Query thread status */
+			gdb_cmd_thread(ks);
+			break;
+		case 'z': /* Break point remove */
+		case 'Z': /* Break point set */
+			gdb_cmd_break(ks);
+			break;
+#ifdef CONFIG_KGDB_KDB
+		case '3': /* Escape into back into kdb */
+			if (remcom_in_buffer[1] == '\0') {
+				gdb_cmd_detachkill(ks);
+				return DBG_PASS_EVENT;
+			}
+#endif
+		case 'C': /* Exception passing */
+			tmp = gdb_cmd_exception_pass(ks);
+			if (tmp > 0)
+				goto default_handle;
+			if (tmp == 0)
+				break;
+			/* Fall through on tmp < 0 */
+		case 'c': /* Continue packet */
+		case 's': /* Single step packet */
+			if (kgdb_contthread && kgdb_contthread != current) {
+				/* Can't switch threads in kgdb */
+				error_packet(remcom_out_buffer, -EINVAL);
+				break;
+			}
+			dbg_activate_sw_breakpoints();
+			/* Fall through to default processing */
+		default:
+default_handle:
+			error = kgdb_arch_handle_exception(ks->ex_vector,
+						ks->signo,
+						ks->err_code,
+						remcom_in_buffer,
+						remcom_out_buffer,
+						ks->linux_regs);
+			/*
+			 * Leave cmd processing on error, detach,
+			 * kill, continue, or single step.
+			 */
+			if (error >= 0 || remcom_in_buffer[0] == 'D' ||
+			    remcom_in_buffer[0] == 'k') {
+				error = 0;
+				goto kgdb_exit;
+			}
+
+		}
+
+		/* reply to the request */
+		put_packet(remcom_out_buffer);
+	}
+
+kgdb_exit:
+	if (ks->pass_exception)
+		error = 1;
+	return error;
+}
+
+int gdbstub_state(struct kgdb_state *ks, char *cmd)
+{
+	int error;
+
+	switch (cmd[0]) {
+	case 'e':
+		error = kgdb_arch_handle_exception(ks->ex_vector,
+						   ks->signo,
+						   ks->err_code,
+						   remcom_in_buffer,
+						   remcom_out_buffer,
+						   ks->linux_regs);
+		return error;
+	case 's':
+	case 'c':
+		strcpy(remcom_in_buffer, cmd);
+		return 0;
+	case '?':
+		gdb_cmd_status(ks);
+		break;
+	case '\0':
+		strcpy(remcom_out_buffer, "");
+		break;
+	}
+	dbg_io_ops->write_char('+');
+	put_packet(remcom_out_buffer);
+	return 0;
+}
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
new file mode 100644
index 0000000..396d12e
--- /dev/null
+++ b/kernel/debug/kdb/.gitignore
@@ -0,0 +1 @@
+gen-kdb_cmds.c
diff --git a/kernel/debug/kdb/Makefile b/kernel/debug/kdb/Makefile
new file mode 100644
index 0000000..d4fc58f
--- /dev/null
+++ b/kernel/debug/kdb/Makefile
@@ -0,0 +1,25 @@
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+#
+
+CCVERSION	:= $(shell $(CC) -v 2>&1 | sed -ne '$$p')
+obj-y := kdb_io.o kdb_main.o kdb_support.o kdb_bt.o gen-kdb_cmds.o kdb_bp.o kdb_debugger.o
+obj-$(CONFIG_KDB_KEYBOARD)    += kdb_keyboard.o
+
+clean-files := gen-kdb_cmds.c
+
+quiet_cmd_gen-kdb = GENKDB  $@
+      cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include <linux/stddef.h>"; print "\#include <linux/init.h>"} \
+		/^\#/{next} \
+		/^[ \t]*$$/{next} \
+		{gsub(/"/, "\\\"", $$0); \
+		  print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \
+		END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print "  kdb_cmd" i ","}; print("  NULL\n};");}' \
+		$(filter-out %/Makefile,$^) > $@#
+
+$(obj)/gen-kdb_cmds.c:	$(src)/kdb_cmds $(src)/Makefile
+	$(call cmd,gen-kdb)
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
new file mode 100644
index 0000000..75bd9b3
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -0,0 +1,564 @@
+/*
+ * Kernel Debugger Architecture Independent Breakpoint Handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdb.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include "kdb_private.h"
+
+/*
+ * Table of kdb_breakpoints
+ */
+kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
+
+static void kdb_setsinglestep(struct pt_regs *regs)
+{
+	KDB_STATE_SET(DOING_SS);
+}
+
+static char *kdb_rwtypes[] = {
+	"Instruction(i)",
+	"Instruction(Register)",
+	"Data Write",
+	"I/O",
+	"Data Access"
+};
+
+static char *kdb_bptype(kdb_bp_t *bp)
+{
+	if (bp->bp_type < 0 || bp->bp_type > 4)
+		return "";
+
+	return kdb_rwtypes[bp->bp_type];
+}
+
+static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
+{
+	int nextarg = *nextargp;
+	int diag;
+
+	bp->bph_length = 1;
+	if ((argc + 1) != nextarg) {
+		if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
+			bp->bp_type = BP_ACCESS_WATCHPOINT;
+		else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
+			bp->bp_type = BP_WRITE_WATCHPOINT;
+		else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
+			bp->bp_type = BP_HARDWARE_BREAKPOINT;
+		else
+			return KDB_ARGCOUNT;
+
+		bp->bph_length = 1;
+
+		nextarg++;
+
+		if ((argc + 1) != nextarg) {
+			unsigned long len;
+
+			diag = kdbgetularg((char *)argv[nextarg],
+					   &len);
+			if (diag)
+				return diag;
+
+
+			if (len > 8)
+				return KDB_BADLENGTH;
+
+			bp->bph_length = len;
+			nextarg++;
+		}
+
+		if ((argc + 1) != nextarg)
+			return KDB_ARGCOUNT;
+	}
+
+	*nextargp = nextarg;
+	return 0;
+}
+
+static int _kdb_bp_remove(kdb_bp_t *bp)
+{
+	int ret = 1;
+	if (!bp->bp_installed)
+		return ret;
+	if (!bp->bp_type)
+		ret = dbg_remove_sw_break(bp->bp_addr);
+	else
+		ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
+			 bp->bph_length,
+			 bp->bp_type);
+	if (ret == 0)
+		bp->bp_installed = 0;
+	return ret;
+}
+
+static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+	if (KDB_DEBUG(BP))
+		kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
+
+	/*
+	 * Setup single step
+	 */
+	kdb_setsinglestep(regs);
+
+	/*
+	 * Reset delay attribute
+	 */
+	bp->bp_delay = 0;
+	bp->bp_delayed = 1;
+}
+
+static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
+{
+	int ret;
+	/*
+	 * Install the breakpoint, if it is not already installed.
+	 */
+
+	if (KDB_DEBUG(BP))
+		kdb_printf("%s: bp_installed %d\n",
+			   __func__, bp->bp_installed);
+	if (!KDB_STATE(SSBPT))
+		bp->bp_delay = 0;
+	if (bp->bp_installed)
+		return 1;
+	if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
+		if (KDB_DEBUG(BP))
+			kdb_printf("%s: delayed bp\n", __func__);
+		kdb_handle_bp(regs, bp);
+		return 0;
+	}
+	if (!bp->bp_type)
+		ret = dbg_set_sw_break(bp->bp_addr);
+	else
+		ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
+			 bp->bph_length,
+			 bp->bp_type);
+	if (ret == 0) {
+		bp->bp_installed = 1;
+	} else {
+		kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
+			   __func__, bp->bp_addr);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * kdb_bp_install
+ *
+ *	Install kdb_breakpoints prior to returning from the
+ *	kernel debugger.  This allows the kdb_breakpoints to be set
+ *	upon functions that are used internally by kdb, such as
+ *	printk().  This function is only called once per kdb session.
+ */
+void kdb_bp_install(struct pt_regs *regs)
+{
+	int i;
+
+	for (i = 0; i < KDB_MAXBPT; i++) {
+		kdb_bp_t *bp = &kdb_breakpoints[i];
+
+		if (KDB_DEBUG(BP)) {
+			kdb_printf("%s: bp %d bp_enabled %d\n",
+				   __func__, i, bp->bp_enabled);
+		}
+		if (bp->bp_enabled)
+			_kdb_bp_install(regs, bp);
+	}
+}
+
+/*
+ * kdb_bp_remove
+ *
+ *	Remove kdb_breakpoints upon entry to the kernel debugger.
+ *
+ * Parameters:
+ *	None.
+ * Outputs:
+ *	None.
+ * Returns:
+ *	None.
+ * Locking:
+ *	None.
+ * Remarks:
+ */
+void kdb_bp_remove(void)
+{
+	int i;
+
+	for (i = KDB_MAXBPT - 1; i >= 0; i--) {
+		kdb_bp_t *bp = &kdb_breakpoints[i];
+
+		if (KDB_DEBUG(BP)) {
+			kdb_printf("%s: bp %d bp_enabled %d\n",
+				   __func__, i, bp->bp_enabled);
+		}
+		if (bp->bp_enabled)
+			_kdb_bp_remove(bp);
+	}
+}
+
+
+/*
+ * kdb_printbp
+ *
+ *	Internal function to format and print a breakpoint entry.
+ *
+ * Parameters:
+ *	None.
+ * Outputs:
+ *	None.
+ * Returns:
+ *	None.
+ * Locking:
+ *	None.
+ * Remarks:
+ */
+
+static void kdb_printbp(kdb_bp_t *bp, int i)
+{
+	kdb_printf("%s ", kdb_bptype(bp));
+	kdb_printf("BP #%d at ", i);
+	kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
+
+	if (bp->bp_enabled)
+		kdb_printf("\n    is enabled");
+	else
+		kdb_printf("\n    is disabled");
+
+	kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
+		   bp->bp_addr, bp->bp_type, bp->bp_installed);
+
+	kdb_printf("\n");
+}
+
+/*
+ * kdb_bp
+ *
+ *	Handle the bp commands.
+ *
+ *	[bp|bph] <addr-expression> [DATAR|DATAW]
+ *
+ * Parameters:
+ *	argc	Count of arguments in argv
+ *	argv	Space delimited command line arguments
+ * Outputs:
+ *	None.
+ * Returns:
+ *	Zero for success, a kdb diagnostic if failure.
+ * Locking:
+ *	None.
+ * Remarks:
+ *
+ *	bp	Set breakpoint on all cpus.  Only use hardware assist if need.
+ *	bph	Set breakpoint on all cpus.  Force hardware register
+ */
+
+static int kdb_bp(int argc, const char **argv)
+{
+	int i, bpno;
+	kdb_bp_t *bp, *bp_check;
+	int diag;
+	int free;
+	char *symname = NULL;
+	long offset = 0ul;
+	int nextarg;
+	kdb_bp_t template = {0};
+
+	if (argc == 0) {
+		/*
+		 * Display breakpoint table
+		 */
+		for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
+		     bpno++, bp++) {
+			if (bp->bp_free)
+				continue;
+			kdb_printbp(bp, bpno);
+		}
+
+		return 0;
+	}
+
+	nextarg = 1;
+	diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
+			     &offset, &symname);
+	if (diag)
+		return diag;
+	if (!template.bp_addr)
+		return KDB_BADINT;
+
+	/*
+	 * Find an empty bp structure to allocate
+	 */
+	free = KDB_MAXBPT;
+	for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
+		if (bp->bp_free)
+			break;
+	}
+
+	if (bpno == KDB_MAXBPT)
+		return KDB_TOOMANYBPT;
+
+	if (strcmp(argv[0], "bph") == 0) {
+		template.bp_type = BP_HARDWARE_BREAKPOINT;
+		diag = kdb_parsebp(argc, argv, &nextarg, &template);
+		if (diag)
+			return diag;
+	} else {
+		template.bp_type = BP_BREAKPOINT;
+	}
+
+	/*
+	 * Check for clashing breakpoints.
+	 *
+	 * Note, in this design we can't have hardware breakpoints
+	 * enabled for both read and write on the same address.
+	 */
+	for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
+	     i++, bp_check++) {
+		if (!bp_check->bp_free &&
+		    bp_check->bp_addr == template.bp_addr) {
+			kdb_printf("You already have a breakpoint at "
+				   kdb_bfd_vma_fmt0 "\n", template.bp_addr);
+			return KDB_DUPBPT;
+		}
+	}
+
+	template.bp_enabled = 1;
+
+	/*
+	 * Actually allocate the breakpoint found earlier
+	 */
+	*bp = template;
+	bp->bp_free = 0;
+
+	kdb_printbp(bp, bpno);
+
+	return 0;
+}
+
+/*
+ * kdb_bc
+ *
+ *	Handles the 'bc', 'be', and 'bd' commands
+ *
+ *	[bd|bc|be] <breakpoint-number>
+ *	[bd|bc|be] *
+ *
+ * Parameters:
+ *	argc	Count of arguments in argv
+ *	argv	Space delimited command line arguments
+ * Outputs:
+ *	None.
+ * Returns:
+ *	Zero for success, a kdb diagnostic for failure
+ * Locking:
+ *	None.
+ * Remarks:
+ */
+static int kdb_bc(int argc, const char **argv)
+{
+	unsigned long addr;
+	kdb_bp_t *bp = NULL;
+	int lowbp = KDB_MAXBPT;
+	int highbp = 0;
+	int done = 0;
+	int i;
+	int diag = 0;
+
+	int cmd;			/* KDBCMD_B? */
+#define KDBCMD_BC	0
+#define KDBCMD_BE	1
+#define KDBCMD_BD	2
+
+	if (strcmp(argv[0], "be") == 0)
+		cmd = KDBCMD_BE;
+	else if (strcmp(argv[0], "bd") == 0)
+		cmd = KDBCMD_BD;
+	else
+		cmd = KDBCMD_BC;
+
+	if (argc != 1)
+		return KDB_ARGCOUNT;
+
+	if (strcmp(argv[1], "*") == 0) {
+		lowbp = 0;
+		highbp = KDB_MAXBPT;
+	} else {
+		diag = kdbgetularg(argv[1], &addr);
+		if (diag)
+			return diag;
+
+		/*
+		 * For addresses less than the maximum breakpoint number,
+		 * assume that the breakpoint number is desired.
+		 */
+		if (addr < KDB_MAXBPT) {
+			bp = &kdb_breakpoints[addr];
+			lowbp = highbp = addr;
+			highbp++;
+		} else {
+			for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
+			    i++, bp++) {
+				if (bp->bp_addr == addr) {
+					lowbp = highbp = i;
+					highbp++;
+					break;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Now operate on the set of breakpoints matching the input
+	 * criteria (either '*' for all, or an individual breakpoint).
+	 */
+	for (bp = &kdb_breakpoints[lowbp], i = lowbp;
+	    i < highbp;
+	    i++, bp++) {
+		if (bp->bp_free)
+			continue;
+
+		done++;
+
+		switch (cmd) {
+		case KDBCMD_BC:
+			bp->bp_enabled = 0;
+
+			kdb_printf("Breakpoint %d at "
+				   kdb_bfd_vma_fmt " cleared\n",
+				   i, bp->bp_addr);
+
+			bp->bp_addr = 0;
+			bp->bp_free = 1;
+
+			break;
+		case KDBCMD_BE:
+			bp->bp_enabled = 1;
+
+			kdb_printf("Breakpoint %d at "
+				   kdb_bfd_vma_fmt " enabled",
+				   i, bp->bp_addr);
+
+			kdb_printf("\n");
+			break;
+		case KDBCMD_BD:
+			if (!bp->bp_enabled)
+				break;
+
+			bp->bp_enabled = 0;
+
+			kdb_printf("Breakpoint %d at "
+				   kdb_bfd_vma_fmt " disabled\n",
+				   i, bp->bp_addr);
+
+			break;
+		}
+		if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
+			bp->bp_delay = 0;
+			KDB_STATE_CLEAR(SSBPT);
+		}
+	}
+
+	return (!done) ? KDB_BPTNOTFOUND : 0;
+}
+
+/*
+ * kdb_ss
+ *
+ *	Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
+ *	commands.
+ *
+ *	ss
+ *	ssb
+ *
+ * Parameters:
+ *	argc	Argument count
+ *	argv	Argument vector
+ * Outputs:
+ *	None.
+ * Returns:
+ *	KDB_CMD_SS[B] for success, a kdb error if failure.
+ * Locking:
+ *	None.
+ * Remarks:
+ *
+ *	Set the arch specific option to trigger a debug trap after the next
+ *	instruction.
+ *
+ *	For 'ssb', set the trace flag in the debug trap handler
+ *	after printing the current insn and return directly without
+ *	invoking the kdb command processor, until a branch instruction
+ *	is encountered.
+ */
+
+static int kdb_ss(int argc, const char **argv)
+{
+	int ssb = 0;
+
+	ssb = (strcmp(argv[0], "ssb") == 0);
+	if (argc != 0)
+		return KDB_ARGCOUNT;
+	/*
+	 * Set trace flag and go.
+	 */
+	KDB_STATE_SET(DOING_SS);
+	if (ssb) {
+		KDB_STATE_SET(DOING_SSB);
+		return KDB_CMD_SSB;
+	}
+	return KDB_CMD_SS;
+}
+
+/* Initialize the breakpoint table and register	breakpoint commands. */
+
+void __init kdb_initbptab(void)
+{
+	int i;
+	kdb_bp_t *bp;
+
+	/*
+	 * First time initialization.
+	 */
+	memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
+
+	for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
+		bp->bp_free = 1;
+
+	kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
+		"Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
+		"Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+	if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
+		kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
+		"[datar [length]|dataw [length]]   Set hw brk", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("bc", kdb_bc, "<bpnum>",
+		"Clear Breakpoint", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("be", kdb_bc, "<bpnum>",
+		"Enable Breakpoint", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("bd", kdb_bc, "<bpnum>",
+		"Disable Breakpoint", 0, KDB_REPEAT_NONE);
+
+	kdb_register_repeat("ss", kdb_ss, "",
+		"Single Step", 1, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("ssb", kdb_ss, "",
+		"Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
+	/*
+	 * Architecture dependent initialization.
+	 */
+}
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
new file mode 100644
index 0000000..2f62fe8
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -0,0 +1,210 @@
+/*
+ * Kernel Debugger Architecture Independent Stack Traceback
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kdb.h>
+#include <linux/nmi.h>
+#include <asm/system.h>
+#include "kdb_private.h"
+
+
+static void kdb_show_stack(struct task_struct *p, void *addr)
+{
+	int old_lvl = console_loglevel;
+	console_loglevel = 15;
+	kdb_trap_printk++;
+	kdb_set_current_task(p);
+	if (addr) {
+		show_stack((struct task_struct *)p, addr);
+	} else if (kdb_current_regs) {
+#ifdef CONFIG_X86
+		show_stack(p, &kdb_current_regs->sp);
+#else
+		show_stack(p, NULL);
+#endif
+	} else {
+		show_stack(p, NULL);
+	}
+	console_loglevel = old_lvl;
+	kdb_trap_printk--;
+}
+
+/*
+ * kdb_bt
+ *
+ *	This function implements the 'bt' command.  Print a stack
+ *	traceback.
+ *
+ *	bt [<address-expression>]	(addr-exp is for alternate stacks)
+ *	btp <pid>			Kernel stack for <pid>
+ *	btt <address-expression>	Kernel stack for task structure at
+ *					<address-expression>
+ *	bta [DRSTCZEUIMA]		All useful processes, optionally
+ *					filtered by state
+ *	btc [<cpu>]			The current process on one cpu,
+ *					default is all cpus
+ *
+ *	bt <address-expression> refers to a address on the stack, that location
+ *	is assumed to contain a return address.
+ *
+ *	btt <address-expression> refers to the address of a struct task.
+ *
+ * Inputs:
+ *	argc	argument count
+ *	argv	argument vector
+ * Outputs:
+ *	None.
+ * Returns:
+ *	zero for success, a kdb diagnostic if error
+ * Locking:
+ *	none.
+ * Remarks:
+ *	Backtrack works best when the code uses frame pointers.  But even
+ *	without frame pointers we should get a reasonable trace.
+ *
+ *	mds comes in handy when examining the stack to do a manual traceback or
+ *	to get a starting point for bt <address-expression>.
+ */
+
+static int
+kdb_bt1(struct task_struct *p, unsigned long mask,
+	int argcount, int btaprompt)
+{
+	char buffer[2];
+	if (kdb_getarea(buffer[0], (unsigned long)p) ||
+	    kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
+		return KDB_BADADDR;
+	if (!kdb_task_state(p, mask))
+		return 0;
+	kdb_printf("Stack traceback for pid %d\n", p->pid);
+	kdb_ps1(p);
+	kdb_show_stack(p, NULL);
+	if (btaprompt) {
+		kdb_getstr(buffer, sizeof(buffer),
+			   "Enter <q> to end, <cr> to continue:");
+		if (buffer[0] == 'q') {
+			kdb_printf("\n");
+			return 1;
+		}
+	}
+	touch_nmi_watchdog();
+	return 0;
+}
+
+int
+kdb_bt(int argc, const char **argv)
+{
+	int diag;
+	int argcount = 5;
+	int btaprompt = 1;
+	int nextarg;
+	unsigned long addr;
+	long offset;
+
+	kdbgetintenv("BTARGS", &argcount);	/* Arguments to print */
+	kdbgetintenv("BTAPROMPT", &btaprompt);	/* Prompt after each
+						 * proc in bta */
+
+	if (strcmp(argv[0], "bta") == 0) {
+		struct task_struct *g, *p;
+		unsigned long cpu;
+		unsigned long mask = kdb_task_state_string(argc ? argv[1] :
+							   NULL);
+		if (argc == 0)
+			kdb_ps_suppressed();
+		/* Run the active tasks first */
+		for_each_online_cpu(cpu) {
+			p = kdb_curr_task(cpu);
+			if (kdb_bt1(p, mask, argcount, btaprompt))
+				return 0;
+		}
+		/* Now the inactive tasks */
+		kdb_do_each_thread(g, p) {
+			if (task_curr(p))
+				continue;
+			if (kdb_bt1(p, mask, argcount, btaprompt))
+				return 0;
+		} kdb_while_each_thread(g, p);
+	} else if (strcmp(argv[0], "btp") == 0) {
+		struct task_struct *p;
+		unsigned long pid;
+		if (argc != 1)
+			return KDB_ARGCOUNT;
+		diag = kdbgetularg((char *)argv[1], &pid);
+		if (diag)
+			return diag;
+		p = find_task_by_pid_ns(pid, &init_pid_ns);
+		if (p) {
+			kdb_set_current_task(p);
+			return kdb_bt1(p, ~0UL, argcount, 0);
+		}
+		kdb_printf("No process with pid == %ld found\n", pid);
+		return 0;
+	} else if (strcmp(argv[0], "btt") == 0) {
+		if (argc != 1)
+			return KDB_ARGCOUNT;
+		diag = kdbgetularg((char *)argv[1], &addr);
+		if (diag)
+			return diag;
+		kdb_set_current_task((struct task_struct *)addr);
+		return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
+	} else if (strcmp(argv[0], "btc") == 0) {
+		unsigned long cpu = ~0;
+		struct task_struct *save_current_task = kdb_current_task;
+		char buf[80];
+		if (argc > 1)
+			return KDB_ARGCOUNT;
+		if (argc == 1) {
+			diag = kdbgetularg((char *)argv[1], &cpu);
+			if (diag)
+				return diag;
+		}
+		/* Recursive use of kdb_parse, do not use argv after
+		 * this point */
+		argv = NULL;
+		if (cpu != ~0) {
+			if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
+				kdb_printf("no process for cpu %ld\n", cpu);
+				return 0;
+			}
+			sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+			kdb_parse(buf);
+			return 0;
+		}
+		kdb_printf("btc: cpu status: ");
+		kdb_parse("cpu\n");
+		for_each_online_cpu(cpu) {
+			sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+			kdb_parse(buf);
+			touch_nmi_watchdog();
+		}
+		kdb_set_current_task(save_current_task);
+		return 0;
+	} else {
+		if (argc) {
+			nextarg = 1;
+			diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+					     &offset, NULL);
+			if (diag)
+				return diag;
+			kdb_show_stack(kdb_current_task, (void *)addr);
+			return 0;
+		} else {
+			return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
+		}
+	}
+
+	/* NOTREACHED */
+	return 0;
+}
diff --git a/kernel/debug/kdb/kdb_cmds b/kernel/debug/kdb/kdb_cmds
new file mode 100644
index 0000000..56c88e4
--- /dev/null
+++ b/kernel/debug/kdb/kdb_cmds
@@ -0,0 +1,35 @@
+# Initial commands for kdb, alter to suit your needs.
+# These commands are executed in kdb_init() context, no SMP, no
+# processes.  Commands that require process data (including stack or
+# registers) are not reliable this early.  set and bp commands should
+# be safe.  Global breakpoint commands affect each cpu as it is booted.
+
+# Standard debugging information for first level support, just type archkdb
+# or archkdbcpu or archkdbshort at the kdb prompt.
+
+defcmd dumpcommon "" "Common kdb debugging"
+  set BTAPROMPT 0
+  set LINES 10000
+  -summary
+  -cpu
+  -ps
+  -dmesg 600
+  -bt
+endefcmd
+
+defcmd dumpall "" "First line debugging"
+  set BTSYMARG 1
+  set BTARGS 9
+  pid R
+  -dumpcommon
+  -bta
+endefcmd
+
+defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
+  set BTSYMARG 1
+  set BTARGS 9
+  pid R
+  -dumpcommon
+  -btc
+endefcmd
+
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
new file mode 100644
index 0000000..bf6e827
--- /dev/null
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -0,0 +1,169 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kdebug.h>
+#include "kdb_private.h"
+#include "../debug_core.h"
+
+/*
+ * KDB interface to KGDB internals
+ */
+get_char_func kdb_poll_funcs[] = {
+	dbg_io_get_char,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+};
+EXPORT_SYMBOL_GPL(kdb_poll_funcs);
+
+int kdb_poll_idx = 1;
+EXPORT_SYMBOL_GPL(kdb_poll_idx);
+
+int kdb_stub(struct kgdb_state *ks)
+{
+	int error = 0;
+	kdb_bp_t *bp;
+	unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+	kdb_reason_t reason = KDB_REASON_OOPS;
+	kdb_dbtrap_t db_result = KDB_DB_NOBPT;
+	int i;
+
+	if (KDB_STATE(REENTRY)) {
+		reason = KDB_REASON_SWITCH;
+		KDB_STATE_CLEAR(REENTRY);
+		addr = instruction_pointer(ks->linux_regs);
+	}
+	ks->pass_exception = 0;
+	if (atomic_read(&kgdb_setting_breakpoint))
+		reason = KDB_REASON_KEYBOARD;
+
+	for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+		if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
+			reason = KDB_REASON_BREAK;
+			db_result = KDB_DB_BPT;
+			if (addr != instruction_pointer(ks->linux_regs))
+				kgdb_arch_set_pc(ks->linux_regs, addr);
+			break;
+		}
+	}
+	if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) {
+		for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+			if (bp->bp_free)
+				continue;
+			if (bp->bp_addr == addr) {
+				bp->bp_delay = 1;
+				bp->bp_delayed = 1;
+	/*
+	 * SSBPT is set when the kernel debugger must single step a
+	 * task in order to re-establish an instruction breakpoint
+	 * which uses the instruction replacement mechanism.  It is
+	 * cleared by any action that removes the need to single-step
+	 * the breakpoint.
+	 */
+				reason = KDB_REASON_BREAK;
+				db_result = KDB_DB_BPT;
+				KDB_STATE_SET(SSBPT);
+				break;
+			}
+		}
+	}
+
+	if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 &&
+		ks->signo == SIGTRAP) {
+		reason = KDB_REASON_SSTEP;
+		db_result = KDB_DB_BPT;
+	}
+	/* Set initial kdb state variables */
+	KDB_STATE_CLEAR(KGDB_TRANS);
+	kdb_initial_cpu = ks->cpu;
+	kdb_current_task = kgdb_info[ks->cpu].task;
+	kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+	/* Remove any breakpoints as needed by kdb and clear single step */
+	kdb_bp_remove();
+	KDB_STATE_CLEAR(DOING_SS);
+	KDB_STATE_CLEAR(DOING_SSB);
+	KDB_STATE_SET(PAGER);
+	/* zero out any offline cpu data */
+	for_each_present_cpu(i) {
+		if (!cpu_online(i)) {
+			kgdb_info[i].debuggerinfo = NULL;
+			kgdb_info[i].task = NULL;
+		}
+	}
+	if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
+		ks->pass_exception = 1;
+		KDB_FLAG_SET(CATASTROPHIC);
+	}
+	kdb_initial_cpu = ks->cpu;
+	if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
+		KDB_STATE_CLEAR(SSBPT);
+		KDB_STATE_CLEAR(DOING_SS);
+	} else {
+		/* Start kdb main loop */
+		error = kdb_main_loop(KDB_REASON_ENTER, reason,
+				      ks->err_code, db_result, ks->linux_regs);
+	}
+	/*
+	 * Upon exit from the kdb main loop setup break points and restart
+	 * the system based on the requested continue state
+	 */
+	kdb_initial_cpu = -1;
+	kdb_current_task = NULL;
+	kdb_current_regs = NULL;
+	KDB_STATE_CLEAR(PAGER);
+	kdbnearsym_cleanup();
+	if (error == KDB_CMD_KGDB) {
+		if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) {
+	/*
+	 * This inteface glue which allows kdb to transition in into
+	 * the gdb stub.  In order to do this the '?' or '' gdb serial
+	 * packet response is processed here.  And then control is
+	 * passed to the gdbstub.
+	 */
+			if (KDB_STATE(DOING_KGDB))
+				gdbstub_state(ks, "?");
+			else
+				gdbstub_state(ks, "");
+			KDB_STATE_CLEAR(DOING_KGDB);
+			KDB_STATE_CLEAR(DOING_KGDB2);
+		}
+		return DBG_PASS_EVENT;
+	}
+	kdb_bp_install(ks->linux_regs);
+	dbg_activate_sw_breakpoints();
+	/* Set the exit state to a single step or a continue */
+	if (KDB_STATE(DOING_SS))
+		gdbstub_state(ks, "s");
+	else
+		gdbstub_state(ks, "c");
+
+	KDB_FLAG_CLEAR(CATASTROPHIC);
+
+	/* Invoke arch specific exception handling prior to system resume */
+	kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
+	if (ks->pass_exception)
+		kgdb_info[ks->cpu].ret_state = 1;
+	if (error == KDB_CMD_CPU) {
+		KDB_STATE_SET(REENTRY);
+		/*
+		 * Force clear the single step bit because kdb emulates this
+		 * differently vs the gdbstub
+		 */
+		kgdb_single_step = 0;
+		dbg_deactivate_sw_breakpoints();
+		return DBG_SWITCH_CPU_EVENT;
+	}
+	return kgdb_info[ks->cpu].ret_state;
+}
+
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
new file mode 100644
index 0000000..c9b7f4f9
--- /dev/null
+++ b/kernel/debug/kdb/kdb_io.c
@@ -0,0 +1,826 @@
+/*
+ * Kernel Debugger Architecture Independent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kallsyms.h>
+#include "kdb_private.h"
+
+#define CMD_BUFLEN 256
+char kdb_prompt_str[CMD_BUFLEN];
+
+int kdb_trap_printk;
+
+static void kgdb_transition_check(char *buffer)
+{
+	int slen = strlen(buffer);
+	if (strncmp(buffer, "$?#3f", slen) != 0 &&
+	    strncmp(buffer, "$qSupported#37", slen) != 0 &&
+	    strncmp(buffer, "+$qSupported#37", slen) != 0) {
+		KDB_STATE_SET(KGDB_TRANS);
+		kdb_printf("%s", buffer);
+	}
+}
+
+static int kdb_read_get_key(char *buffer, size_t bufsize)
+{
+#define ESCAPE_UDELAY 1000
+#define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */
+	char escape_data[5];	/* longest vt100 escape sequence is 4 bytes */
+	char *ped = escape_data;
+	int escape_delay = 0;
+	get_char_func *f, *f_escape = NULL;
+	int key;
+
+	for (f = &kdb_poll_funcs[0]; ; ++f) {
+		if (*f == NULL) {
+			/* Reset NMI watchdog once per poll loop */
+			touch_nmi_watchdog();
+			f = &kdb_poll_funcs[0];
+		}
+		if (escape_delay == 2) {
+			*ped = '\0';
+			ped = escape_data;
+			--escape_delay;
+		}
+		if (escape_delay == 1) {
+			key = *ped++;
+			if (!*ped)
+				--escape_delay;
+			break;
+		}
+		key = (*f)();
+		if (key == -1) {
+			if (escape_delay) {
+				udelay(ESCAPE_UDELAY);
+				--escape_delay;
+			}
+			continue;
+		}
+		if (bufsize <= 2) {
+			if (key == '\r')
+				key = '\n';
+			*buffer++ = key;
+			*buffer = '\0';
+			return -1;
+		}
+		if (escape_delay == 0 && key == '\e') {
+			escape_delay = ESCAPE_DELAY;
+			ped = escape_data;
+			f_escape = f;
+		}
+		if (escape_delay) {
+			*ped++ = key;
+			if (f_escape != f) {
+				escape_delay = 2;
+				continue;
+			}
+			if (ped - escape_data == 1) {
+				/* \e */
+				continue;
+			} else if (ped - escape_data == 2) {
+				/* \e<something> */
+				if (key != '[')
+					escape_delay = 2;
+				continue;
+			} else if (ped - escape_data == 3) {
+				/* \e[<something> */
+				int mapkey = 0;
+				switch (key) {
+				case 'A': /* \e[A, up arrow */
+					mapkey = 16;
+					break;
+				case 'B': /* \e[B, down arrow */
+					mapkey = 14;
+					break;
+				case 'C': /* \e[C, right arrow */
+					mapkey = 6;
+					break;
+				case 'D': /* \e[D, left arrow */
+					mapkey = 2;
+					break;
+				case '1': /* dropthrough */
+				case '3': /* dropthrough */
+				/* \e[<1,3,4>], may be home, del, end */
+				case '4':
+					mapkey = -1;
+					break;
+				}
+				if (mapkey != -1) {
+					if (mapkey > 0) {
+						escape_data[0] = mapkey;
+						escape_data[1] = '\0';
+					}
+					escape_delay = 2;
+				}
+				continue;
+			} else if (ped - escape_data == 4) {
+				/* \e[<1,3,4><something> */
+				int mapkey = 0;
+				if (key == '~') {
+					switch (escape_data[2]) {
+					case '1': /* \e[1~, home */
+						mapkey = 1;
+						break;
+					case '3': /* \e[3~, del */
+						mapkey = 4;
+						break;
+					case '4': /* \e[4~, end */
+						mapkey = 5;
+						break;
+					}
+				}
+				if (mapkey > 0) {
+					escape_data[0] = mapkey;
+					escape_data[1] = '\0';
+				}
+				escape_delay = 2;
+				continue;
+			}
+		}
+		break;	/* A key to process */
+	}
+	return key;
+}
+
+/*
+ * kdb_read
+ *
+ *	This function reads a string of characters, terminated by
+ *	a newline, or by reaching the end of the supplied buffer,
+ *	from the current kernel debugger console device.
+ * Parameters:
+ *	buffer	- Address of character buffer to receive input characters.
+ *	bufsize - size, in bytes, of the character buffer
+ * Returns:
+ *	Returns a pointer to the buffer containing the received
+ *	character string.  This string will be terminated by a
+ *	newline character.
+ * Locking:
+ *	No locks are required to be held upon entry to this
+ *	function.  It is not reentrant - it relies on the fact
+ *	that while kdb is running on only one "master debug" cpu.
+ * Remarks:
+ *
+ * The buffer size must be >= 2.  A buffer size of 2 means that the caller only
+ * wants a single key.
+ *
+ * An escape key could be the start of a vt100 control sequence such as \e[D
+ * (left arrow) or it could be a character in its own right.  The standard
+ * method for detecting the difference is to wait for 2 seconds to see if there
+ * are any other characters.  kdb is complicated by the lack of a timer service
+ * (interrupts are off), by multiple input sources and by the need to sometimes
+ * return after just one key.  Escape sequence processing has to be done as
+ * states in the polling loop.
+ */
+
+static char *kdb_read(char *buffer, size_t bufsize)
+{
+	char *cp = buffer;
+	char *bufend = buffer+bufsize-2;	/* Reserve space for newline
+						 * and null byte */
+	char *lastchar;
+	char *p_tmp;
+	char tmp;
+	static char tmpbuffer[CMD_BUFLEN];
+	int len = strlen(buffer);
+	int len_tmp;
+	int tab = 0;
+	int count;
+	int i;
+	int diag, dtab_count;
+	int key;
+
+
+	diag = kdbgetintenv("DTABCOUNT", &dtab_count);
+	if (diag)
+		dtab_count = 30;
+
+	if (len > 0) {
+		cp += len;
+		if (*(buffer+len-1) == '\n')
+			cp--;
+	}
+
+	lastchar = cp;
+	*cp = '\0';
+	kdb_printf("%s", buffer);
+poll_again:
+	key = kdb_read_get_key(buffer, bufsize);
+	if (key == -1)
+		return buffer;
+	if (key != 9)
+		tab = 0;
+	switch (key) {
+	case 8: /* backspace */
+		if (cp > buffer) {
+			if (cp < lastchar) {
+				memcpy(tmpbuffer, cp, lastchar - cp);
+				memcpy(cp-1, tmpbuffer, lastchar - cp);
+			}
+			*(--lastchar) = '\0';
+			--cp;
+			kdb_printf("\b%s \r", cp);
+			tmp = *cp;
+			*cp = '\0';
+			kdb_printf(kdb_prompt_str);
+			kdb_printf("%s", buffer);
+			*cp = tmp;
+		}
+		break;
+	case 13: /* enter */
+		*lastchar++ = '\n';
+		*lastchar++ = '\0';
+		kdb_printf("\n");
+		return buffer;
+	case 4: /* Del */
+		if (cp < lastchar) {
+			memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+			memcpy(cp, tmpbuffer, lastchar - cp - 1);
+			*(--lastchar) = '\0';
+			kdb_printf("%s \r", cp);
+			tmp = *cp;
+			*cp = '\0';
+			kdb_printf(kdb_prompt_str);
+			kdb_printf("%s", buffer);
+			*cp = tmp;
+		}
+		break;
+	case 1: /* Home */
+		if (cp > buffer) {
+			kdb_printf("\r");
+			kdb_printf(kdb_prompt_str);
+			cp = buffer;
+		}
+		break;
+	case 5: /* End */
+		if (cp < lastchar) {
+			kdb_printf("%s", cp);
+			cp = lastchar;
+		}
+		break;
+	case 2: /* Left */
+		if (cp > buffer) {
+			kdb_printf("\b");
+			--cp;
+		}
+		break;
+	case 14: /* Down */
+		memset(tmpbuffer, ' ',
+		       strlen(kdb_prompt_str) + (lastchar-buffer));
+		*(tmpbuffer+strlen(kdb_prompt_str) +
+		  (lastchar-buffer)) = '\0';
+		kdb_printf("\r%s\r", tmpbuffer);
+		*lastchar = (char)key;
+		*(lastchar+1) = '\0';
+		return lastchar;
+	case 6: /* Right */
+		if (cp < lastchar) {
+			kdb_printf("%c", *cp);
+			++cp;
+		}
+		break;
+	case 16: /* Up */
+		memset(tmpbuffer, ' ',
+		       strlen(kdb_prompt_str) + (lastchar-buffer));
+		*(tmpbuffer+strlen(kdb_prompt_str) +
+		  (lastchar-buffer)) = '\0';
+		kdb_printf("\r%s\r", tmpbuffer);
+		*lastchar = (char)key;
+		*(lastchar+1) = '\0';
+		return lastchar;
+	case 9: /* Tab */
+		if (tab < 2)
+			++tab;
+		p_tmp = buffer;
+		while (*p_tmp == ' ')
+			p_tmp++;
+		if (p_tmp > cp)
+			break;
+		memcpy(tmpbuffer, p_tmp, cp-p_tmp);
+		*(tmpbuffer + (cp-p_tmp)) = '\0';
+		p_tmp = strrchr(tmpbuffer, ' ');
+		if (p_tmp)
+			++p_tmp;
+		else
+			p_tmp = tmpbuffer;
+		len = strlen(p_tmp);
+		count = kallsyms_symbol_complete(p_tmp,
+						 sizeof(tmpbuffer) -
+						 (p_tmp - tmpbuffer));
+		if (tab == 2 && count > 0) {
+			kdb_printf("\n%d symbols are found.", count);
+			if (count > dtab_count) {
+				count = dtab_count;
+				kdb_printf(" But only first %d symbols will"
+					   " be printed.\nYou can change the"
+					   " environment variable DTABCOUNT.",
+					   count);
+			}
+			kdb_printf("\n");
+			for (i = 0; i < count; i++) {
+				if (kallsyms_symbol_next(p_tmp, i) < 0)
+					break;
+				kdb_printf("%s ", p_tmp);
+				*(p_tmp + len) = '\0';
+			}
+			if (i >= dtab_count)
+				kdb_printf("...");
+			kdb_printf("\n");
+			kdb_printf(kdb_prompt_str);
+			kdb_printf("%s", buffer);
+		} else if (tab != 2 && count > 0) {
+			len_tmp = strlen(p_tmp);
+			strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+			len_tmp = strlen(p_tmp);
+			strncpy(cp, p_tmp+len, len_tmp-len + 1);
+			len = len_tmp - len;
+			kdb_printf("%s", cp);
+			cp += len;
+			lastchar += len;
+		}
+		kdb_nextline = 1; /* reset output line number */
+		break;
+	default:
+		if (key >= 32 && lastchar < bufend) {
+			if (cp < lastchar) {
+				memcpy(tmpbuffer, cp, lastchar - cp);
+				memcpy(cp+1, tmpbuffer, lastchar - cp);
+				*++lastchar = '\0';
+				*cp = key;
+				kdb_printf("%s\r", cp);
+				++cp;
+				tmp = *cp;
+				*cp = '\0';
+				kdb_printf(kdb_prompt_str);
+				kdb_printf("%s", buffer);
+				*cp = tmp;
+			} else {
+				*++lastchar = '\0';
+				*cp++ = key;
+				/* The kgdb transition check will hide
+				 * printed characters if we think that
+				 * kgdb is connecting, until the check
+				 * fails */
+				if (!KDB_STATE(KGDB_TRANS))
+					kgdb_transition_check(buffer);
+				else
+					kdb_printf("%c", key);
+			}
+			/* Special escape to kgdb */
+			if (lastchar - buffer >= 5 &&
+			    strcmp(lastchar - 5, "$?#3f") == 0) {
+				strcpy(buffer, "kgdb");
+				KDB_STATE_SET(DOING_KGDB);
+				return buffer;
+			}
+			if (lastchar - buffer >= 14 &&
+			    strcmp(lastchar - 14, "$qSupported#37") == 0) {
+				strcpy(buffer, "kgdb");
+				KDB_STATE_SET(DOING_KGDB2);
+				return buffer;
+			}
+		}
+		break;
+	}
+	goto poll_again;
+}
+
+/*
+ * kdb_getstr
+ *
+ *	Print the prompt string and read a command from the
+ *	input device.
+ *
+ * Parameters:
+ *	buffer	Address of buffer to receive command
+ *	bufsize Size of buffer in bytes
+ *	prompt	Pointer to string to use as prompt string
+ * Returns:
+ *	Pointer to command buffer.
+ * Locking:
+ *	None.
+ * Remarks:
+ *	For SMP kernels, the processor number will be
+ *	substituted for %d, %x or %o in the prompt.
+ */
+
+char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+{
+	if (prompt && kdb_prompt_str != prompt)
+		strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+	kdb_printf(kdb_prompt_str);
+	kdb_nextline = 1;	/* Prompt and input resets line number */
+	return kdb_read(buffer, bufsize);
+}
+
+/*
+ * kdb_input_flush
+ *
+ *	Get rid of any buffered console input.
+ *
+ * Parameters:
+ *	none
+ * Returns:
+ *	nothing
+ * Locking:
+ *	none
+ * Remarks:
+ *	Call this function whenever you want to flush input.  If there is any
+ *	outstanding input, it ignores all characters until there has been no
+ *	data for approximately 1ms.
+ */
+
+static void kdb_input_flush(void)
+{
+	get_char_func *f;
+	int res;
+	int flush_delay = 1;
+	while (flush_delay) {
+		flush_delay--;
+empty:
+		touch_nmi_watchdog();
+		for (f = &kdb_poll_funcs[0]; *f; ++f) {
+			res = (*f)();
+			if (res != -1) {
+				flush_delay = 1;
+				goto empty;
+			}
+		}
+		if (flush_delay)
+			mdelay(1);
+	}
+}
+
+/*
+ * kdb_printf
+ *
+ *	Print a string to the output device(s).
+ *
+ * Parameters:
+ *	printf-like format and optional args.
+ * Returns:
+ *	0
+ * Locking:
+ *	None.
+ * Remarks:
+ *	use 'kdbcons->write()' to avoid polluting 'log_buf' with
+ *	kdb output.
+ *
+ *  If the user is doing a cmd args | grep srch
+ *  then kdb_grepping_flag is set.
+ *  In that case we need to accumulate full lines (ending in \n) before
+ *  searching for the pattern.
+ */
+
+static char kdb_buffer[256];	/* A bit too big to go on stack */
+static char *next_avail = kdb_buffer;
+static int  size_avail;
+static int  suspend_grep;
+
+/*
+ * search arg1 to see if it contains arg2
+ * (kdmain.c provides flags for ^pat and pat$)
+ *
+ * return 1 for found, 0 for not found
+ */
+static int kdb_search_string(char *searched, char *searchfor)
+{
+	char firstchar, *cp;
+	int len1, len2;
+
+	/* not counting the newline at the end of "searched" */
+	len1 = strlen(searched)-1;
+	len2 = strlen(searchfor);
+	if (len1 < len2)
+		return 0;
+	if (kdb_grep_leading && kdb_grep_trailing && len1 != len2)
+		return 0;
+	if (kdb_grep_leading) {
+		if (!strncmp(searched, searchfor, len2))
+			return 1;
+	} else if (kdb_grep_trailing) {
+		if (!strncmp(searched+len1-len2, searchfor, len2))
+			return 1;
+	} else {
+		firstchar = *searchfor;
+		cp = searched;
+		while ((cp = strchr(cp, firstchar))) {
+			if (!strncmp(cp, searchfor, len2))
+				return 1;
+			cp++;
+		}
+	}
+	return 0;
+}
+
+int vkdb_printf(const char *fmt, va_list ap)
+{
+	int diag;
+	int linecount;
+	int logging, saved_loglevel = 0;
+	int saved_trap_printk;
+	int got_printf_lock = 0;
+	int retlen = 0;
+	int fnd, len;
+	char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
+	char *moreprompt = "more> ";
+	struct console *c = console_drivers;
+	static DEFINE_SPINLOCK(kdb_printf_lock);
+	unsigned long uninitialized_var(flags);
+
+	preempt_disable();
+	saved_trap_printk = kdb_trap_printk;
+	kdb_trap_printk = 0;
+
+	/* Serialize kdb_printf if multiple cpus try to write at once.
+	 * But if any cpu goes recursive in kdb, just print the output,
+	 * even if it is interleaved with any other text.
+	 */
+	if (!KDB_STATE(PRINTF_LOCK)) {
+		KDB_STATE_SET(PRINTF_LOCK);
+		spin_lock_irqsave(&kdb_printf_lock, flags);
+		got_printf_lock = 1;
+		atomic_inc(&kdb_event);
+	} else {
+		__acquire(kdb_printf_lock);
+	}
+
+	diag = kdbgetintenv("LINES", &linecount);
+	if (diag || linecount <= 1)
+		linecount = 24;
+
+	diag = kdbgetintenv("LOGGING", &logging);
+	if (diag)
+		logging = 0;
+
+	if (!kdb_grepping_flag || suspend_grep) {
+		/* normally, every vsnprintf starts a new buffer */
+		next_avail = kdb_buffer;
+		size_avail = sizeof(kdb_buffer);
+	}
+	vsnprintf(next_avail, size_avail, fmt, ap);
+
+	/*
+	 * If kdb_parse() found that the command was cmd xxx | grep yyy
+	 * then kdb_grepping_flag is set, and kdb_grep_string contains yyy
+	 *
+	 * Accumulate the print data up to a newline before searching it.
+	 * (vsnprintf does null-terminate the string that it generates)
+	 */
+
+	/* skip the search if prints are temporarily unconditional */
+	if (!suspend_grep && kdb_grepping_flag) {
+		cp = strchr(kdb_buffer, '\n');
+		if (!cp) {
+			/*
+			 * Special cases that don't end with newlines
+			 * but should be written without one:
+			 *   The "[nn]kdb> " prompt should
+			 *   appear at the front of the buffer.
+			 *
+			 *   The "[nn]more " prompt should also be
+			 *     (MOREPROMPT -> moreprompt)
+			 *   written *   but we print that ourselves,
+			 *   we set the suspend_grep flag to make
+			 *   it unconditional.
+			 *
+			 */
+			if (next_avail == kdb_buffer) {
+				/*
+				 * these should occur after a newline,
+				 * so they will be at the front of the
+				 * buffer
+				 */
+				cp2 = kdb_buffer;
+				len = strlen(kdb_prompt_str);
+				if (!strncmp(cp2, kdb_prompt_str, len)) {
+					/*
+					 * We're about to start a new
+					 * command, so we can go back
+					 * to normal mode.
+					 */
+					kdb_grepping_flag = 0;
+					goto kdb_printit;
+				}
+			}
+			/* no newline; don't search/write the buffer
+			   until one is there */
+			len = strlen(kdb_buffer);
+			next_avail = kdb_buffer + len;
+			size_avail = sizeof(kdb_buffer) - len;
+			goto kdb_print_out;
+		}
+
+		/*
+		 * The newline is present; print through it or discard
+		 * it, depending on the results of the search.
+		 */
+		cp++;	 	     /* to byte after the newline */
+		replaced_byte = *cp; /* remember what/where it was */
+		cphold = cp;
+		*cp = '\0';	     /* end the string for our search */
+
+		/*
+		 * We now have a newline at the end of the string
+		 * Only continue with this output if it contains the
+		 * search string.
+		 */
+		fnd = kdb_search_string(kdb_buffer, kdb_grep_string);
+		if (!fnd) {
+			/*
+			 * At this point the complete line at the start
+			 * of kdb_buffer can be discarded, as it does
+			 * not contain what the user is looking for.
+			 * Shift the buffer left.
+			 */
+			*cphold = replaced_byte;
+			strcpy(kdb_buffer, cphold);
+			len = strlen(kdb_buffer);
+			next_avail = kdb_buffer + len;
+			size_avail = sizeof(kdb_buffer) - len;
+			goto kdb_print_out;
+		}
+		/*
+		 * at this point the string is a full line and
+		 * should be printed, up to the null.
+		 */
+	}
+kdb_printit:
+
+	/*
+	 * Write to all consoles.
+	 */
+	retlen = strlen(kdb_buffer);
+	if (!dbg_kdb_mode && kgdb_connected) {
+		gdbstub_msg_write(kdb_buffer, retlen);
+	} else {
+		if (!dbg_io_ops->is_console) {
+			len = strlen(kdb_buffer);
+			cp = kdb_buffer;
+			while (len--) {
+				dbg_io_ops->write_char(*cp);
+				cp++;
+			}
+		}
+		while (c) {
+			c->write(c, kdb_buffer, retlen);
+			touch_nmi_watchdog();
+			c = c->next;
+		}
+	}
+	if (logging) {
+		saved_loglevel = console_loglevel;
+		console_loglevel = 0;
+		printk(KERN_INFO "%s", kdb_buffer);
+	}
+
+	if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+		kdb_nextline++;
+
+	/* check for having reached the LINES number of printed lines */
+	if (kdb_nextline == linecount) {
+		char buf1[16] = "";
+#if defined(CONFIG_SMP)
+		char buf2[32];
+#endif
+
+		/* Watch out for recursion here.  Any routine that calls
+		 * kdb_printf will come back through here.  And kdb_read
+		 * uses kdb_printf to echo on serial consoles ...
+		 */
+		kdb_nextline = 1;	/* In case of recursion */
+
+		/*
+		 * Pause until cr.
+		 */
+		moreprompt = kdbgetenv("MOREPROMPT");
+		if (moreprompt == NULL)
+			moreprompt = "more> ";
+
+#if defined(CONFIG_SMP)
+		if (strchr(moreprompt, '%')) {
+			sprintf(buf2, moreprompt, get_cpu());
+			put_cpu();
+			moreprompt = buf2;
+		}
+#endif
+
+		kdb_input_flush();
+		c = console_drivers;
+
+		if (!dbg_io_ops->is_console) {
+			len = strlen(moreprompt);
+			cp = moreprompt;
+			while (len--) {
+				dbg_io_ops->write_char(*cp);
+				cp++;
+			}
+		}
+		while (c) {
+			c->write(c, moreprompt, strlen(moreprompt));
+			touch_nmi_watchdog();
+			c = c->next;
+		}
+
+		if (logging)
+			printk("%s", moreprompt);
+
+		kdb_read(buf1, 2); /* '2' indicates to return
+				    * immediately after getting one key. */
+		kdb_nextline = 1;	/* Really set output line 1 */
+
+		/* empty and reset the buffer: */
+		kdb_buffer[0] = '\0';
+		next_avail = kdb_buffer;
+		size_avail = sizeof(kdb_buffer);
+		if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
+			/* user hit q or Q */
+			KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */
+			KDB_STATE_CLEAR(PAGER);
+			/* end of command output; back to normal mode */
+			kdb_grepping_flag = 0;
+			kdb_printf("\n");
+		} else if (buf1[0] == ' ') {
+			kdb_printf("\n");
+			suspend_grep = 1; /* for this recursion */
+		} else if (buf1[0] == '\n') {
+			kdb_nextline = linecount - 1;
+			kdb_printf("\r");
+			suspend_grep = 1; /* for this recursion */
+		} else if (buf1[0] && buf1[0] != '\n') {
+			/* user hit something other than enter */
+			suspend_grep = 1; /* for this recursion */
+			kdb_printf("\nOnly 'q' or 'Q' are processed at more "
+				   "prompt, input ignored\n");
+		} else if (kdb_grepping_flag) {
+			/* user hit enter */
+			suspend_grep = 1; /* for this recursion */
+			kdb_printf("\n");
+		}
+		kdb_input_flush();
+	}
+
+	/*
+	 * For grep searches, shift the printed string left.
+	 *  replaced_byte contains the character that was overwritten with
+	 *  the terminating null, and cphold points to the null.
+	 * Then adjust the notion of available space in the buffer.
+	 */
+	if (kdb_grepping_flag && !suspend_grep) {
+		*cphold = replaced_byte;
+		strcpy(kdb_buffer, cphold);
+		len = strlen(kdb_buffer);
+		next_avail = kdb_buffer + len;
+		size_avail = sizeof(kdb_buffer) - len;
+	}
+
+kdb_print_out:
+	suspend_grep = 0; /* end of what may have been a recursive call */
+	if (logging)
+		console_loglevel = saved_loglevel;
+	if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
+		got_printf_lock = 0;
+		spin_unlock_irqrestore(&kdb_printf_lock, flags);
+		KDB_STATE_CLEAR(PRINTF_LOCK);
+		atomic_dec(&kdb_event);
+	} else {
+		__release(kdb_printf_lock);
+	}
+	kdb_trap_printk = saved_trap_printk;
+	preempt_enable();
+	return retlen;
+}
+
+int kdb_printf(const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+	r = vkdb_printf(fmt, ap);
+	va_end(ap);
+
+	return r;
+}
+
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
new file mode 100644
index 0000000..4bca634
--- /dev/null
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -0,0 +1,212 @@
+/*
+ * Kernel Debugger Architecture Dependent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/kdb.h>
+#include <linux/keyboard.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+/* Keyboard Controller Registers on normal PCs. */
+
+#define KBD_STATUS_REG		0x64	/* Status register (R) */
+#define KBD_DATA_REG		0x60	/* Keyboard data register (R/W) */
+
+/* Status Register Bits */
+
+#define KBD_STAT_OBF 		0x01	/* Keyboard output buffer full */
+#define KBD_STAT_MOUSE_OBF	0x20	/* Mouse output buffer full */
+
+static int kbd_exists;
+
+/*
+ * Check if the keyboard controller has a keypress for us.
+ * Some parts (Enter Release, LED change) are still blocking polled here,
+ * but hopefully they are all short.
+ */
+int kdb_get_kbd_char(void)
+{
+	int scancode, scanstatus;
+	static int shift_lock;	/* CAPS LOCK state (0-off, 1-on) */
+	static int shift_key;	/* Shift next keypress */
+	static int ctrl_key;
+	u_short keychar;
+
+	if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
+	    (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
+		kbd_exists = 0;
+		return -1;
+	}
+	kbd_exists = 1;
+
+	if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+		return -1;
+
+	/*
+	 * Fetch the scancode
+	 */
+	scancode = inb(KBD_DATA_REG);
+	scanstatus = inb(KBD_STATUS_REG);
+
+	/*
+	 * Ignore mouse events.
+	 */
+	if (scanstatus & KBD_STAT_MOUSE_OBF)
+		return -1;
+
+	/*
+	 * Ignore release, trigger on make
+	 * (except for shift keys, where we want to
+	 *  keep the shift state so long as the key is
+	 *  held down).
+	 */
+
+	if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
+		/*
+		 * Next key may use shift table
+		 */
+		if ((scancode & 0x80) == 0)
+			shift_key = 1;
+		else
+			shift_key = 0;
+		return -1;
+	}
+
+	if ((scancode&0x7f) == 0x1d) {
+		/*
+		 * Left ctrl key
+		 */
+		if ((scancode & 0x80) == 0)
+			ctrl_key = 1;
+		else
+			ctrl_key = 0;
+		return -1;
+	}
+
+	if ((scancode & 0x80) != 0)
+		return -1;
+
+	scancode &= 0x7f;
+
+	/*
+	 * Translate scancode
+	 */
+
+	if (scancode == 0x3a) {
+		/*
+		 * Toggle caps lock
+		 */
+		shift_lock ^= 1;
+
+#ifdef	KDB_BLINK_LED
+		kdb_toggleled(0x4);
+#endif
+		return -1;
+	}
+
+	if (scancode == 0x0e) {
+		/*
+		 * Backspace
+		 */
+		return 8;
+	}
+
+	/* Special Key */
+	switch (scancode) {
+	case 0xF: /* Tab */
+		return 9;
+	case 0x53: /* Del */
+		return 4;
+	case 0x47: /* Home */
+		return 1;
+	case 0x4F: /* End */
+		return 5;
+	case 0x4B: /* Left */
+		return 2;
+	case 0x48: /* Up */
+		return 16;
+	case 0x50: /* Down */
+		return 14;
+	case 0x4D: /* Right */
+		return 6;
+	}
+
+	if (scancode == 0xe0)
+		return -1;
+
+	/*
+	 * For Japanese 86/106 keyboards
+	 * 	See comment in drivers/char/pc_keyb.c.
+	 * 	- Masahiro Adegawa
+	 */
+	if (scancode == 0x73)
+		scancode = 0x59;
+	else if (scancode == 0x7d)
+		scancode = 0x7c;
+
+	if (!shift_lock && !shift_key && !ctrl_key) {
+		keychar = plain_map[scancode];
+	} else if ((shift_lock || shift_key) && key_maps[1]) {
+		keychar = key_maps[1][scancode];
+	} else if (ctrl_key && key_maps[4]) {
+		keychar = key_maps[4][scancode];
+	} else {
+		keychar = 0x0020;
+		kdb_printf("Unknown state/scancode (%d)\n", scancode);
+	}
+	keychar &= 0x0fff;
+	if (keychar == '\t')
+		keychar = ' ';
+	switch (KTYP(keychar)) {
+	case KT_LETTER:
+	case KT_LATIN:
+		if (isprint(keychar))
+			break;		/* printable characters */
+		/* drop through */
+	case KT_SPEC:
+		if (keychar == K_ENTER)
+			break;
+		/* drop through */
+	default:
+		return -1;	/* ignore unprintables */
+	}
+
+	if ((scancode & 0x7f) == 0x1c) {
+		/*
+		 * enter key.  All done.  Absorb the release scancode.
+		 */
+		while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+			;
+
+		/*
+		 * Fetch the scancode
+		 */
+		scancode = inb(KBD_DATA_REG);
+		scanstatus = inb(KBD_STATUS_REG);
+
+		while (scanstatus & KBD_STAT_MOUSE_OBF) {
+			scancode = inb(KBD_DATA_REG);
+			scanstatus = inb(KBD_STATUS_REG);
+		}
+
+		if (scancode != 0x9c) {
+			/*
+			 * Wasn't an enter-release,  why not?
+			 */
+			kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
+			       scancode, scanstatus);
+		}
+
+		return 13;
+	}
+
+	return keychar & 0xff;
+}
+EXPORT_SYMBOL_GPL(kdb_get_kbd_char);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
new file mode 100644
index 0000000..b724c79
--- /dev/null
+++ b/kernel/debug/kdb/kdb_main.c
@@ -0,0 +1,2849 @@
+/*
+ * Kernel Debugger Architecture Independent Main Code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Xscale (R) modifications copyright (C) 2003 Intel Corporation.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/smp.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nmi.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/kdebug.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+#define GREP_LEN 256
+char kdb_grep_string[GREP_LEN];
+int kdb_grepping_flag;
+EXPORT_SYMBOL(kdb_grepping_flag);
+int kdb_grep_leading;
+int kdb_grep_trailing;
+
+/*
+ * Kernel debugger state flags
+ */
+int kdb_flags;
+atomic_t kdb_event;
+
+/*
+ * kdb_lock protects updates to kdb_initial_cpu.  Used to
+ * single thread processors through the kernel debugger.
+ */
+int kdb_initial_cpu = -1;	/* cpu number that owns kdb */
+int kdb_nextline = 1;
+int kdb_state;			/* General KDB state */
+
+struct task_struct *kdb_current_task;
+EXPORT_SYMBOL(kdb_current_task);
+struct pt_regs *kdb_current_regs;
+
+const char *kdb_diemsg;
+static int kdb_go_count;
+#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
+static unsigned int kdb_continue_catastrophic =
+	CONFIG_KDB_CONTINUE_CATASTROPHIC;
+#else
+static unsigned int kdb_continue_catastrophic;
+#endif
+
+/* kdb_commands describes the available commands. */
+static kdbtab_t *kdb_commands;
+#define KDB_BASE_CMD_MAX 50
+static int kdb_max_commands = KDB_BASE_CMD_MAX;
+static kdbtab_t kdb_base_commands[50];
+#define for_each_kdbcmd(cmd, num)					\
+	for ((cmd) = kdb_base_commands, (num) = 0;			\
+	     num < kdb_max_commands;					\
+	     num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++)
+
+typedef struct _kdbmsg {
+	int	km_diag;	/* kdb diagnostic */
+	char	*km_msg;	/* Corresponding message text */
+} kdbmsg_t;
+
+#define KDBMSG(msgnum, text) \
+	{ KDB_##msgnum, text }
+
+static kdbmsg_t kdbmsgs[] = {
+	KDBMSG(NOTFOUND, "Command Not Found"),
+	KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
+	KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
+	       "8 is only allowed on 64 bit systems"),
+	KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
+	KDBMSG(NOTENV, "Cannot find environment variable"),
+	KDBMSG(NOENVVALUE, "Environment variable should have value"),
+	KDBMSG(NOTIMP, "Command not implemented"),
+	KDBMSG(ENVFULL, "Environment full"),
+	KDBMSG(ENVBUFFULL, "Environment buffer full"),
+	KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
+#ifdef CONFIG_CPU_XSCALE
+	KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
+#else
+	KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
+#endif
+	KDBMSG(DUPBPT, "Duplicate breakpoint address"),
+	KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
+	KDBMSG(BADMODE, "Invalid IDMODE"),
+	KDBMSG(BADINT, "Illegal numeric value"),
+	KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
+	KDBMSG(BADREG, "Invalid register name"),
+	KDBMSG(BADCPUNUM, "Invalid cpu number"),
+	KDBMSG(BADLENGTH, "Invalid length field"),
+	KDBMSG(NOBP, "No Breakpoint exists"),
+	KDBMSG(BADADDR, "Invalid address"),
+};
+#undef KDBMSG
+
+static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+
+
+/*
+ * Initial environment.   This is all kept static and local to
+ * this file.   We don't want to rely on the memory allocation
+ * mechanisms in the kernel, so we use a very limited allocate-only
+ * heap for new and altered environment variables.  The entire
+ * environment is limited to a fixed number of entries (add more
+ * to __env[] if required) and a fixed amount of heap (add more to
+ * KDB_ENVBUFSIZE if required).
+ */
+
+static char *__env[] = {
+#if defined(CONFIG_SMP)
+ "PROMPT=[%d]kdb> ",
+ "MOREPROMPT=[%d]more> ",
+#else
+ "PROMPT=kdb> ",
+ "MOREPROMPT=more> ",
+#endif
+ "RADIX=16",
+ "MDCOUNT=8",			/* lines of md output */
+ "BTARGS=9",			/* 9 possible args in bt */
+ KDB_PLATFORM_ENV,
+ "DTABCOUNT=30",
+ "NOSECT=1",
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+};
+
+static const int __nenv = (sizeof(__env) / sizeof(char *));
+
+struct task_struct *kdb_curr_task(int cpu)
+{
+	struct task_struct *p = curr_task(cpu);
+#ifdef	_TIF_MCA_INIT
+	if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
+		p = krp->p;
+#endif
+	return p;
+}
+
+/*
+ * kdbgetenv - This function will return the character string value of
+ *	an environment variable.
+ * Parameters:
+ *	match	A character string representing an environment variable.
+ * Returns:
+ *	NULL	No environment variable matches 'match'
+ *	char*	Pointer to string value of environment variable.
+ */
+char *kdbgetenv(const char *match)
+{
+	char **ep = __env;
+	int matchlen = strlen(match);
+	int i;
+
+	for (i = 0; i < __nenv; i++) {
+		char *e = *ep++;
+
+		if (!e)
+			continue;
+
+		if ((strncmp(match, e, matchlen) == 0)
+		 && ((e[matchlen] == '\0')
+		   || (e[matchlen] == '='))) {
+			char *cp = strchr(e, '=');
+			return cp ? ++cp : "";
+		}
+	}
+	return NULL;
+}
+
+/*
+ * kdballocenv - This function is used to allocate bytes for
+ *	environment entries.
+ * Parameters:
+ *	match	A character string representing a numeric value
+ * Outputs:
+ *	*value  the unsigned long representation of the env variable 'match'
+ * Returns:
+ *	Zero on success, a kdb diagnostic on failure.
+ * Remarks:
+ *	We use a static environment buffer (envbuffer) to hold the values
+ *	of dynamically generated environment variables (see kdb_set).  Buffer
+ *	space once allocated is never free'd, so over time, the amount of space
+ *	(currently 512 bytes) will be exhausted if env variables are changed
+ *	frequently.
+ */
+static char *kdballocenv(size_t bytes)
+{
+#define	KDB_ENVBUFSIZE	512
+	static char envbuffer[KDB_ENVBUFSIZE];
+	static int envbufsize;
+	char *ep = NULL;
+
+	if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
+		ep = &envbuffer[envbufsize];
+		envbufsize += bytes;
+	}
+	return ep;
+}
+
+/*
+ * kdbgetulenv - This function will return the value of an unsigned
+ *	long-valued environment variable.
+ * Parameters:
+ *	match	A character string representing a numeric value
+ * Outputs:
+ *	*value  the unsigned long represntation of the env variable 'match'
+ * Returns:
+ *	Zero on success, a kdb diagnostic on failure.
+ */
+static int kdbgetulenv(const char *match, unsigned long *value)
+{
+	char *ep;
+
+	ep = kdbgetenv(match);
+	if (!ep)
+		return KDB_NOTENV;
+	if (strlen(ep) == 0)
+		return KDB_NOENVVALUE;
+
+	*value = simple_strtoul(ep, NULL, 0);
+
+	return 0;
+}
+
+/*
+ * kdbgetintenv - This function will return the value of an
+ *	integer-valued environment variable.
+ * Parameters:
+ *	match	A character string representing an integer-valued env variable
+ * Outputs:
+ *	*value  the integer representation of the environment variable 'match'
+ * Returns:
+ *	Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetintenv(const char *match, int *value)
+{
+	unsigned long val;
+	int diag;
+
+	diag = kdbgetulenv(match, &val);
+	if (!diag)
+		*value = (int) val;
+	return diag;
+}
+
+/*
+ * kdbgetularg - This function will convert a numeric string into an
+ *	unsigned long value.
+ * Parameters:
+ *	arg	A character string representing a numeric value
+ * Outputs:
+ *	*value  the unsigned long represntation of arg.
+ * Returns:
+ *	Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetularg(const char *arg, unsigned long *value)
+{
+	char *endp;
+	unsigned long val;
+
+	val = simple_strtoul(arg, &endp, 0);
+
+	if (endp == arg) {
+		/*
+		 * Try base 16, for us folks too lazy to type the
+		 * leading 0x...
+		 */
+		val = simple_strtoul(arg, &endp, 16);
+		if (endp == arg)
+			return KDB_BADINT;
+	}
+
+	*value = val;
+
+	return 0;
+}
+
+/*
+ * kdb_set - This function implements the 'set' command.  Alter an
+ *	existing environment variable or create a new one.
+ */
+int kdb_set(int argc, const char **argv)
+{
+	int i;
+	char *ep;
+	size_t varlen, vallen;
+
+	/*
+	 * we can be invoked two ways:
+	 *   set var=value    argv[1]="var", argv[2]="value"
+	 *   set var = value  argv[1]="var", argv[2]="=", argv[3]="value"
+	 * - if the latter, shift 'em down.
+	 */
+	if (argc == 3) {
+		argv[2] = argv[3];
+		argc--;
+	}
+
+	if (argc != 2)
+		return KDB_ARGCOUNT;
+
+	/*
+	 * Check for internal variables
+	 */
+	if (strcmp(argv[1], "KDBDEBUG") == 0) {
+		unsigned int debugflags;
+		char *cp;
+
+		debugflags = simple_strtoul(argv[2], &cp, 0);
+		if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
+			kdb_printf("kdb: illegal debug flags '%s'\n",
+				    argv[2]);
+			return 0;
+		}
+		kdb_flags = (kdb_flags &
+			     ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
+			| (debugflags << KDB_DEBUG_FLAG_SHIFT);
+
+		return 0;
+	}
+
+	/*
+	 * Tokenizer squashed the '=' sign.  argv[1] is variable
+	 * name, argv[2] = value.
+	 */
+	varlen = strlen(argv[1]);
+	vallen = strlen(argv[2]);
+	ep = kdballocenv(varlen + vallen + 2);
+	if (ep == (char *)0)
+		return KDB_ENVBUFFULL;
+
+	sprintf(ep, "%s=%s", argv[1], argv[2]);
+
+	ep[varlen+vallen+1] = '\0';
+
+	for (i = 0; i < __nenv; i++) {
+		if (__env[i]
+		 && ((strncmp(__env[i], argv[1], varlen) == 0)
+		   && ((__env[i][varlen] == '\0')
+		    || (__env[i][varlen] == '=')))) {
+			__env[i] = ep;
+			return 0;
+		}
+	}
+
+	/*
+	 * Wasn't existing variable.  Fit into slot.
+	 */
+	for (i = 0; i < __nenv-1; i++) {
+		if (__env[i] == (char *)0) {
+			__env[i] = ep;
+			return 0;
+		}
+	}
+
+	return KDB_ENVFULL;
+}
+
+static int kdb_check_regs(void)
+{
+	if (!kdb_current_regs) {
+		kdb_printf("No current kdb registers."
+			   "  You may need to select another task\n");
+		return KDB_BADREG;
+	}
+	return 0;
+}
+
+/*
+ * kdbgetaddrarg - This function is responsible for parsing an
+ *	address-expression and returning the value of the expression,
+ *	symbol name, and offset to the caller.
+ *
+ *	The argument may consist of a numeric value (decimal or
+ *	hexidecimal), a symbol name, a register name (preceeded by the
+ *	percent sign), an environment variable with a numeric value
+ *	(preceeded by a dollar sign) or a simple arithmetic expression
+ *	consisting of a symbol name, +/-, and a numeric constant value
+ *	(offset).
+ * Parameters:
+ *	argc	- count of arguments in argv
+ *	argv	- argument vector
+ *	*nextarg - index to next unparsed argument in argv[]
+ *	regs	- Register state at time of KDB entry
+ * Outputs:
+ *	*value	- receives the value of the address-expression
+ *	*offset - receives the offset specified, if any
+ *	*name   - receives the symbol name, if any
+ *	*nextarg - index to next unparsed argument in argv[]
+ * Returns:
+ *	zero is returned on success, a kdb diagnostic code is
+ *      returned on error.
+ */
+int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
+		  unsigned long *value,  long *offset,
+		  char **name)
+{
+	unsigned long addr;
+	unsigned long off = 0;
+	int positive;
+	int diag;
+	int found = 0;
+	char *symname;
+	char symbol = '\0';
+	char *cp;
+	kdb_symtab_t symtab;
+
+	/*
+	 * Process arguments which follow the following syntax:
+	 *
+	 *  symbol | numeric-address [+/- numeric-offset]
+	 *  %register
+	 *  $environment-variable
+	 */
+
+	if (*nextarg > argc)
+		return KDB_ARGCOUNT;
+
+	symname = (char *)argv[*nextarg];
+
+	/*
+	 * If there is no whitespace between the symbol
+	 * or address and the '+' or '-' symbols, we
+	 * remember the character and replace it with a
+	 * null so the symbol/value can be properly parsed
+	 */
+	cp = strpbrk(symname, "+-");
+	if (cp != NULL) {
+		symbol = *cp;
+		*cp++ = '\0';
+	}
+
+	if (symname[0] == '$') {
+		diag = kdbgetulenv(&symname[1], &addr);
+		if (diag)
+			return diag;
+	} else if (symname[0] == '%') {
+		diag = kdb_check_regs();
+		if (diag)
+			return diag;
+		/* Implement register values with % at a later time as it is
+		 * arch optional.
+		 */
+		return KDB_NOTIMP;
+	} else {
+		found = kdbgetsymval(symname, &symtab);
+		if (found) {
+			addr = symtab.sym_start;
+		} else {
+			diag = kdbgetularg(argv[*nextarg], &addr);
+			if (diag)
+				return diag;
+		}
+	}
+
+	if (!found)
+		found = kdbnearsym(addr, &symtab);
+
+	(*nextarg)++;
+
+	if (name)
+		*name = symname;
+	if (value)
+		*value = addr;
+	if (offset && name && *name)
+		*offset = addr - symtab.sym_start;
+
+	if ((*nextarg > argc)
+	 && (symbol == '\0'))
+		return 0;
+
+	/*
+	 * check for +/- and offset
+	 */
+
+	if (symbol == '\0') {
+		if ((argv[*nextarg][0] != '+')
+		 && (argv[*nextarg][0] != '-')) {
+			/*
+			 * Not our argument.  Return.
+			 */
+			return 0;
+		} else {
+			positive = (argv[*nextarg][0] == '+');
+			(*nextarg)++;
+		}
+	} else
+		positive = (symbol == '+');
+
+	/*
+	 * Now there must be an offset!
+	 */
+	if ((*nextarg > argc)
+	 && (symbol == '\0')) {
+		return KDB_INVADDRFMT;
+	}
+
+	if (!symbol) {
+		cp = (char *)argv[*nextarg];
+		(*nextarg)++;
+	}
+
+	diag = kdbgetularg(cp, &off);
+	if (diag)
+		return diag;
+
+	if (!positive)
+		off = -off;
+
+	if (offset)
+		*offset += off;
+
+	if (value)
+		*value += off;
+
+	return 0;
+}
+
+static void kdb_cmderror(int diag)
+{
+	int i;
+
+	if (diag >= 0) {
+		kdb_printf("no error detected (diagnostic is %d)\n", diag);
+		return;
+	}
+
+	for (i = 0; i < __nkdb_err; i++) {
+		if (kdbmsgs[i].km_diag == diag) {
+			kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
+			return;
+		}
+	}
+
+	kdb_printf("Unknown diag %d\n", -diag);
+}
+
+/*
+ * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
+ *	command which defines one command as a set of other commands,
+ *	terminated by endefcmd.  kdb_defcmd processes the initial
+ *	'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
+ *	the following commands until 'endefcmd'.
+ * Inputs:
+ *	argc	argument count
+ *	argv	argument vector
+ * Returns:
+ *	zero for success, a kdb diagnostic if error
+ */
+struct defcmd_set {
+	int count;
+	int usable;
+	char *name;
+	char *usage;
+	char *help;
+	char **command;
+};
+static struct defcmd_set *defcmd_set;
+static int defcmd_set_count;
+static int defcmd_in_progress;
+
+/* Forward references */
+static int kdb_exec_defcmd(int argc, const char **argv);
+
+static int kdb_defcmd2(const char *cmdstr, const char *argv0)
+{
+	struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
+	char **save_command = s->command;
+	if (strcmp(argv0, "endefcmd") == 0) {
+		defcmd_in_progress = 0;
+		if (!s->count)
+			s->usable = 0;
+		if (s->usable)
+			kdb_register(s->name, kdb_exec_defcmd,
+				     s->usage, s->help, 0);
+		return 0;
+	}
+	if (!s->usable)
+		return KDB_NOTIMP;
+	s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+	if (!s->command) {
+		kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
+			   cmdstr);
+		s->usable = 0;
+		return KDB_NOTIMP;
+	}
+	memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
+	s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
+	kfree(save_command);
+	return 0;
+}
+
+static int kdb_defcmd(int argc, const char **argv)
+{
+	struct defcmd_set *save_defcmd_set = defcmd_set, *s;
+	if (defcmd_in_progress) {
+		kdb_printf("kdb: nested defcmd detected, assuming missing "
+			   "endefcmd\n");
+		kdb_defcmd2("endefcmd", "endefcmd");
+	}
+	if (argc == 0) {
+		int i;
+		for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
+			kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
+				   s->usage, s->help);
+			for (i = 0; i < s->count; ++i)
+				kdb_printf("%s", s->command[i]);
+			kdb_printf("endefcmd\n");
+		}
+		return 0;
+	}
+	if (argc != 3)
+		return KDB_ARGCOUNT;
+	defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+			     GFP_KDB);
+	if (!defcmd_set) {
+		kdb_printf("Could not allocate new defcmd_set entry for %s\n",
+			   argv[1]);
+		defcmd_set = save_defcmd_set;
+		return KDB_NOTIMP;
+	}
+	memcpy(defcmd_set, save_defcmd_set,
+	       defcmd_set_count * sizeof(*defcmd_set));
+	kfree(save_defcmd_set);
+	s = defcmd_set + defcmd_set_count;
+	memset(s, 0, sizeof(*s));
+	s->usable = 1;
+	s->name = kdb_strdup(argv[1], GFP_KDB);
+	s->usage = kdb_strdup(argv[2], GFP_KDB);
+	s->help = kdb_strdup(argv[3], GFP_KDB);
+	if (s->usage[0] == '"') {
+		strcpy(s->usage, s->usage+1);
+		s->usage[strlen(s->usage)-1] = '\0';
+	}
+	if (s->help[0] == '"') {
+		strcpy(s->help, s->help+1);
+		s->help[strlen(s->help)-1] = '\0';
+	}
+	++defcmd_set_count;
+	defcmd_in_progress = 1;
+	return 0;
+}
+
+/*
+ * kdb_exec_defcmd - Execute the set of commands associated with this
+ *	defcmd name.
+ * Inputs:
+ *	argc	argument count
+ *	argv	argument vector
+ * Returns:
+ *	zero for success, a kdb diagnostic if error
+ */
+static int kdb_exec_defcmd(int argc, const char **argv)
+{
+	int i, ret;
+	struct defcmd_set *s;
+	if (argc != 0)
+		return KDB_ARGCOUNT;
+	for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
+		if (strcmp(s->name, argv[0]) == 0)
+			break;
+	}
+	if (i == defcmd_set_count) {
+		kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
+			   argv[0]);
+		return KDB_NOTIMP;
+	}
+	for (i = 0; i < s->count; ++i) {
+		/* Recursive use of kdb_parse, do not use argv after
+		 * this point */
+		argv = NULL;
+		kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
+		ret = kdb_parse(s->command[i]);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/* Command history */
+#define KDB_CMD_HISTORY_COUNT	32
+#define CMD_BUFLEN		200	/* kdb_printf: max printline
+					 * size == 256 */
+static unsigned int cmd_head, cmd_tail;
+static unsigned int cmdptr;
+static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
+static char cmd_cur[CMD_BUFLEN];
+
+/*
+ * The "str" argument may point to something like  | grep xyz
+ */
+static void parse_grep(const char *str)
+{
+	int	len;
+	char	*cp = (char *)str, *cp2;
+
+	/* sanity check: we should have been called with the \ first */
+	if (*cp != '|')
+		return;
+	cp++;
+	while (isspace(*cp))
+		cp++;
+	if (strncmp(cp, "grep ", 5)) {
+		kdb_printf("invalid 'pipe', see grephelp\n");
+		return;
+	}
+	cp += 5;
+	while (isspace(*cp))
+		cp++;
+	cp2 = strchr(cp, '\n');
+	if (cp2)
+		*cp2 = '\0'; /* remove the trailing newline */
+	len = strlen(cp);
+	if (len == 0) {
+		kdb_printf("invalid 'pipe', see grephelp\n");
+		return;
+	}
+	/* now cp points to a nonzero length search string */
+	if (*cp == '"') {
+		/* allow it be "x y z" by removing the "'s - there must
+		   be two of them */
+		cp++;
+		cp2 = strchr(cp, '"');
+		if (!cp2) {
+			kdb_printf("invalid quoted string, see grephelp\n");
+			return;
+		}
+		*cp2 = '\0'; /* end the string where the 2nd " was */
+	}
+	kdb_grep_leading = 0;
+	if (*cp == '^') {
+		kdb_grep_leading = 1;
+		cp++;
+	}
+	len = strlen(cp);
+	kdb_grep_trailing = 0;
+	if (*(cp+len-1) == '$') {
+		kdb_grep_trailing = 1;
+		*(cp+len-1) = '\0';
+	}
+	len = strlen(cp);
+	if (!len)
+		return;
+	if (len >= GREP_LEN) {
+		kdb_printf("search string too long\n");
+		return;
+	}
+	strcpy(kdb_grep_string, cp);
+	kdb_grepping_flag++;
+	return;
+}
+
+/*
+ * kdb_parse - Parse the command line, search the command table for a
+ *	matching command and invoke the command function.  This
+ *	function may be called recursively, if it is, the second call
+ *	will overwrite argv and cbuf.  It is the caller's
+ *	responsibility to save their argv if they recursively call
+ *	kdb_parse().
+ * Parameters:
+ *      cmdstr	The input command line to be parsed.
+ *	regs	The registers at the time kdb was entered.
+ * Returns:
+ *	Zero for success, a kdb diagnostic if failure.
+ * Remarks:
+ *	Limited to 20 tokens.
+ *
+ *	Real rudimentary tokenization. Basically only whitespace
+ *	is considered a token delimeter (but special consideration
+ *	is taken of the '=' sign as used by the 'set' command).
+ *
+ *	The algorithm used to tokenize the input string relies on
+ *	there being at least one whitespace (or otherwise useless)
+ *	character between tokens as the character immediately following
+ *	the token is altered in-place to a null-byte to terminate the
+ *	token string.
+ */
+
+#define MAXARGC	20
+
+int kdb_parse(const char *cmdstr)
+{
+	static char *argv[MAXARGC];
+	static int argc;
+	static char cbuf[CMD_BUFLEN+2];
+	char *cp;
+	char *cpp, quoted;
+	kdbtab_t *tp;
+	int i, escaped, ignore_errors = 0, check_grep;
+
+	/*
+	 * First tokenize the command string.
+	 */
+	cp = (char *)cmdstr;
+	kdb_grepping_flag = check_grep = 0;
+
+	if (KDB_FLAG(CMD_INTERRUPT)) {
+		/* Previous command was interrupted, newline must not
+		 * repeat the command */
+		KDB_FLAG_CLEAR(CMD_INTERRUPT);
+		KDB_STATE_SET(PAGER);
+		argc = 0;	/* no repeat */
+	}
+
+	if (*cp != '\n' && *cp != '\0') {
+		argc = 0;
+		cpp = cbuf;
+		while (*cp) {
+			/* skip whitespace */
+			while (isspace(*cp))
+				cp++;
+			if ((*cp == '\0') || (*cp == '\n') ||
+			    (*cp == '#' && !defcmd_in_progress))
+				break;
+			/* special case: check for | grep pattern */
+			if (*cp == '|') {
+				check_grep++;
+				break;
+			}
+			if (cpp >= cbuf + CMD_BUFLEN) {
+				kdb_printf("kdb_parse: command buffer "
+					   "overflow, command ignored\n%s\n",
+					   cmdstr);
+				return KDB_NOTFOUND;
+			}
+			if (argc >= MAXARGC - 1) {
+				kdb_printf("kdb_parse: too many arguments, "
+					   "command ignored\n%s\n", cmdstr);
+				return KDB_NOTFOUND;
+			}
+			argv[argc++] = cpp;
+			escaped = 0;
+			quoted = '\0';
+			/* Copy to next unquoted and unescaped
+			 * whitespace or '=' */
+			while (*cp && *cp != '\n' &&
+			       (escaped || quoted || !isspace(*cp))) {
+				if (cpp >= cbuf + CMD_BUFLEN)
+					break;
+				if (escaped) {
+					escaped = 0;
+					*cpp++ = *cp++;
+					continue;
+				}
+				if (*cp == '\\') {
+					escaped = 1;
+					++cp;
+					continue;
+				}
+				if (*cp == quoted)
+					quoted = '\0';
+				else if (*cp == '\'' || *cp == '"')
+					quoted = *cp;
+				*cpp = *cp++;
+				if (*cpp == '=' && !quoted)
+					break;
+				++cpp;
+			}
+			*cpp++ = '\0';	/* Squash a ws or '=' character */
+		}
+	}
+	if (!argc)
+		return 0;
+	if (check_grep)
+		parse_grep(cp);
+	if (defcmd_in_progress) {
+		int result = kdb_defcmd2(cmdstr, argv[0]);
+		if (!defcmd_in_progress) {
+			argc = 0;	/* avoid repeat on endefcmd */
+			*(argv[0]) = '\0';
+		}
+		return result;
+	}
+	if (argv[0][0] == '-' && argv[0][1] &&
+	    (argv[0][1] < '0' || argv[0][1] > '9')) {
+		ignore_errors = 1;
+		++argv[0];
+	}
+
+	for_each_kdbcmd(tp, i) {
+		if (tp->cmd_name) {
+			/*
+			 * If this command is allowed to be abbreviated,
+			 * check to see if this is it.
+			 */
+
+			if (tp->cmd_minlen
+			 && (strlen(argv[0]) <= tp->cmd_minlen)) {
+				if (strncmp(argv[0],
+					    tp->cmd_name,
+					    tp->cmd_minlen) == 0) {
+					break;
+				}
+			}
+
+			if (strcmp(argv[0], tp->cmd_name) == 0)
+				break;
+		}
+	}
+
+	/*
+	 * If we don't find a command by this name, see if the first
+	 * few characters of this match any of the known commands.
+	 * e.g., md1c20 should match md.
+	 */
+	if (i == kdb_max_commands) {
+		for_each_kdbcmd(tp, i) {
+			if (tp->cmd_name) {
+				if (strncmp(argv[0],
+					    tp->cmd_name,
+					    strlen(tp->cmd_name)) == 0) {
+					break;
+				}
+			}
+		}
+	}
+
+	if (i < kdb_max_commands) {
+		int result;
+		KDB_STATE_SET(CMD);
+		result = (*tp->cmd_func)(argc-1, (const char **)argv);
+		if (result && ignore_errors && result > KDB_CMD_GO)
+			result = 0;
+		KDB_STATE_CLEAR(CMD);
+		switch (tp->cmd_repeat) {
+		case KDB_REPEAT_NONE:
+			argc = 0;
+			if (argv[0])
+				*(argv[0]) = '\0';
+			break;
+		case KDB_REPEAT_NO_ARGS:
+			argc = 1;
+			if (argv[1])
+				*(argv[1]) = '\0';
+			break;
+		case KDB_REPEAT_WITH_ARGS:
+			break;
+		}
+		return result;
+	}
+
+	/*
+	 * If the input with which we were presented does not
+	 * map to an existing command, attempt to parse it as an
+	 * address argument and display the result.   Useful for
+	 * obtaining the address of a variable, or the nearest symbol
+	 * to an address contained in a register.
+	 */
+	{
+		unsigned long value;
+		char *name = NULL;
+		long offset;
+		int nextarg = 0;
+
+		if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
+				  &value, &offset, &name)) {
+			return KDB_NOTFOUND;
+		}
+
+		kdb_printf("%s = ", argv[0]);
+		kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
+		kdb_printf("\n");
+		return 0;
+	}
+}
+
+
+static int handle_ctrl_cmd(char *cmd)
+{
+#define CTRL_P	16
+#define CTRL_N	14
+
+	/* initial situation */
+	if (cmd_head == cmd_tail)
+		return 0;
+	switch (*cmd) {
+	case CTRL_P:
+		if (cmdptr != cmd_tail)
+			cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
+		strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+		return 1;
+	case CTRL_N:
+		if (cmdptr != cmd_head)
+			cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
+		strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * kdb_reboot - This function implements the 'reboot' command.  Reboot
+ *	the system immediately, or loop for ever on failure.
+ */
+static int kdb_reboot(int argc, const char **argv)
+{
+	emergency_restart();
+	kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
+	while (1)
+		cpu_relax();
+	/* NOTREACHED */
+	return 0;
+}
+
+static void kdb_dumpregs(struct pt_regs *regs)
+{
+	int old_lvl = console_loglevel;
+	console_loglevel = 15;
+	kdb_trap_printk++;
+	show_regs(regs);
+	kdb_trap_printk--;
+	kdb_printf("\n");
+	console_loglevel = old_lvl;
+}
+
+void kdb_set_current_task(struct task_struct *p)
+{
+	kdb_current_task = p;
+
+	if (kdb_task_has_cpu(p)) {
+		kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
+		return;
+	}
+	kdb_current_regs = NULL;
+}
+
+/*
+ * kdb_local - The main code for kdb.  This routine is invoked on a
+ *	specific processor, it is not global.  The main kdb() routine
+ *	ensures that only one processor at a time is in this routine.
+ *	This code is called with the real reason code on the first
+ *	entry to a kdb session, thereafter it is called with reason
+ *	SWITCH, even if the user goes back to the original cpu.
+ * Inputs:
+ *	reason		The reason KDB was invoked
+ *	error		The hardware-defined error code
+ *	regs		The exception frame at time of fault/breakpoint.
+ *	db_result	Result code from the break or debug point.
+ * Returns:
+ *	0	KDB was invoked for an event which it wasn't responsible
+ *	1	KDB handled the event for which it was invoked.
+ *	KDB_CMD_GO	User typed 'go'.
+ *	KDB_CMD_CPU	User switched to another cpu.
+ *	KDB_CMD_SS	Single step.
+ *	KDB_CMD_SSB	Single step until branch.
+ */
+static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+		     kdb_dbtrap_t db_result)
+{
+	char *cmdbuf;
+	int diag;
+	struct task_struct *kdb_current =
+		kdb_curr_task(raw_smp_processor_id());
+
+	KDB_DEBUG_STATE("kdb_local 1", reason);
+	kdb_go_count = 0;
+	if (reason == KDB_REASON_DEBUG) {
+		/* special case below */
+	} else {
+		kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+			   kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+		kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+	}
+
+	switch (reason) {
+	case KDB_REASON_DEBUG:
+	{
+		/*
+		 * If re-entering kdb after a single step
+		 * command, don't print the message.
+		 */
+		switch (db_result) {
+		case KDB_DB_BPT:
+			kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+				   kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+			kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+			kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
+				   instruction_pointer(regs));
+			break;
+		case KDB_DB_SSB:
+			/*
+			 * In the midst of ssb command. Just return.
+			 */
+			KDB_DEBUG_STATE("kdb_local 3", reason);
+			return KDB_CMD_SSB;	/* Continue with SSB command */
+
+			break;
+		case KDB_DB_SS:
+			break;
+		case KDB_DB_SSBPT:
+			KDB_DEBUG_STATE("kdb_local 4", reason);
+			return 1;	/* kdba_db_trap did the work */
+		default:
+			kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
+				   db_result);
+			break;
+		}
+
+	}
+		break;
+	case KDB_REASON_ENTER:
+		if (KDB_STATE(KEYBOARD))
+			kdb_printf("due to Keyboard Entry\n");
+		else
+			kdb_printf("due to KDB_ENTER()\n");
+		break;
+	case KDB_REASON_KEYBOARD:
+		KDB_STATE_SET(KEYBOARD);
+		kdb_printf("due to Keyboard Entry\n");
+		break;
+	case KDB_REASON_ENTER_SLAVE:
+		/* drop through, slaves only get released via cpu switch */
+	case KDB_REASON_SWITCH:
+		kdb_printf("due to cpu switch\n");
+		break;
+	case KDB_REASON_OOPS:
+		kdb_printf("Oops: %s\n", kdb_diemsg);
+		kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
+			   instruction_pointer(regs));
+		kdb_dumpregs(regs);
+		break;
+	case KDB_REASON_NMI:
+		kdb_printf("due to NonMaskable Interrupt @ "
+			   kdb_machreg_fmt "\n",
+			   instruction_pointer(regs));
+		kdb_dumpregs(regs);
+		break;
+	case KDB_REASON_SSTEP:
+	case KDB_REASON_BREAK:
+		kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
+			   reason == KDB_REASON_BREAK ?
+			   "Breakpoint" : "SS trap", instruction_pointer(regs));
+		/*
+		 * Determine if this breakpoint is one that we
+		 * are interested in.
+		 */
+		if (db_result != KDB_DB_BPT) {
+			kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
+				   db_result);
+			KDB_DEBUG_STATE("kdb_local 6", reason);
+			return 0;	/* Not for us, dismiss it */
+		}
+		break;
+	case KDB_REASON_RECURSE:
+		kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
+			   instruction_pointer(regs));
+		break;
+	default:
+		kdb_printf("kdb: unexpected reason code: %d\n", reason);
+		KDB_DEBUG_STATE("kdb_local 8", reason);
+		return 0;	/* Not for us, dismiss it */
+	}
+
+	while (1) {
+		/*
+		 * Initialize pager context.
+		 */
+		kdb_nextline = 1;
+		KDB_STATE_CLEAR(SUPPRESS);
+
+		cmdbuf = cmd_cur;
+		*cmdbuf = '\0';
+		*(cmd_hist[cmd_head]) = '\0';
+
+		if (KDB_FLAG(ONLY_DO_DUMP)) {
+			/* kdb is off but a catastrophic error requires a dump.
+			 * Take the dump and reboot.
+			 * Turn on logging so the kdb output appears in the log
+			 * buffer in the dump.
+			 */
+			const char *setargs[] = { "set", "LOGGING", "1" };
+			kdb_set(2, setargs);
+			kdb_reboot(0, NULL);
+			/*NOTREACHED*/
+		}
+
+do_full_getstr:
+#if defined(CONFIG_SMP)
+		snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+			 raw_smp_processor_id());
+#else
+		snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
+#endif
+		if (defcmd_in_progress)
+			strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+		/*
+		 * Fetch command from keyboard
+		 */
+		cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
+		if (*cmdbuf != '\n') {
+			if (*cmdbuf < 32) {
+				if (cmdptr == cmd_head) {
+					strncpy(cmd_hist[cmd_head], cmd_cur,
+						CMD_BUFLEN);
+					*(cmd_hist[cmd_head] +
+					  strlen(cmd_hist[cmd_head])-1) = '\0';
+				}
+				if (!handle_ctrl_cmd(cmdbuf))
+					*(cmd_cur+strlen(cmd_cur)-1) = '\0';
+				cmdbuf = cmd_cur;
+				goto do_full_getstr;
+			} else {
+				strncpy(cmd_hist[cmd_head], cmd_cur,
+					CMD_BUFLEN);
+			}
+
+			cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
+			if (cmd_head == cmd_tail)
+				cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
+		}
+
+		cmdptr = cmd_head;
+		diag = kdb_parse(cmdbuf);
+		if (diag == KDB_NOTFOUND) {
+			kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
+			diag = 0;
+		}
+		if (diag == KDB_CMD_GO
+		 || diag == KDB_CMD_CPU
+		 || diag == KDB_CMD_SS
+		 || diag == KDB_CMD_SSB
+		 || diag == KDB_CMD_KGDB)
+			break;
+
+		if (diag)
+			kdb_cmderror(diag);
+	}
+	KDB_DEBUG_STATE("kdb_local 9", diag);
+	return diag;
+}
+
+
+/*
+ * kdb_print_state - Print the state data for the current processor
+ *	for debugging.
+ * Inputs:
+ *	text		Identifies the debug point
+ *	value		Any integer value to be printed, e.g. reason code.
+ */
+void kdb_print_state(const char *text, int value)
+{
+	kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
+		   text, raw_smp_processor_id(), value, kdb_initial_cpu,
+		   kdb_state);
+}
+
+/*
+ * kdb_main_loop - After initial setup and assignment of the
+ *	controlling cpu, all cpus are in this loop.  One cpu is in
+ *	control and will issue the kdb prompt, the others will spin
+ *	until 'go' or cpu switch.
+ *
+ *	To get a consistent view of the kernel stacks for all
+ *	processes, this routine is invoked from the main kdb code via
+ *	an architecture specific routine.  kdba_main_loop is
+ *	responsible for making the kernel stacks consistent for all
+ *	processes, there should be no difference between a blocked
+ *	process and a running process as far as kdb is concerned.
+ * Inputs:
+ *	reason		The reason KDB was invoked
+ *	error		The hardware-defined error code
+ *	reason2		kdb's current reason code.
+ *			Initially error but can change
+ *			acording to kdb state.
+ *	db_result	Result code from break or debug point.
+ *	regs		The exception frame at time of fault/breakpoint.
+ *			should always be valid.
+ * Returns:
+ *	0	KDB was invoked for an event which it wasn't responsible
+ *	1	KDB handled the event for which it was invoked.
+ */
+int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
+	      kdb_dbtrap_t db_result, struct pt_regs *regs)
+{
+	int result = 1;
+	/* Stay in kdb() until 'go', 'ss[b]' or an error */
+	while (1) {
+		/*
+		 * All processors except the one that is in control
+		 * will spin here.
+		 */
+		KDB_DEBUG_STATE("kdb_main_loop 1", reason);
+		while (KDB_STATE(HOLD_CPU)) {
+			/* state KDB is turned off by kdb_cpu to see if the
+			 * other cpus are still live, each cpu in this loop
+			 * turns it back on.
+			 */
+			if (!KDB_STATE(KDB))
+				KDB_STATE_SET(KDB);
+		}
+
+		KDB_STATE_CLEAR(SUPPRESS);
+		KDB_DEBUG_STATE("kdb_main_loop 2", reason);
+		if (KDB_STATE(LEAVING))
+			break;	/* Another cpu said 'go' */
+		/* Still using kdb, this processor is in control */
+		result = kdb_local(reason2, error, regs, db_result);
+		KDB_DEBUG_STATE("kdb_main_loop 3", result);
+
+		if (result == KDB_CMD_CPU)
+			break;
+
+		if (result == KDB_CMD_SS) {
+			KDB_STATE_SET(DOING_SS);
+			break;
+		}
+
+		if (result == KDB_CMD_SSB) {
+			KDB_STATE_SET(DOING_SS);
+			KDB_STATE_SET(DOING_SSB);
+			break;
+		}
+
+		if (result == KDB_CMD_KGDB) {
+			if (!(KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)))
+				kdb_printf("Entering please attach debugger "
+					   "or use $D#44+ or $3#33\n");
+			break;
+		}
+		if (result && result != 1 && result != KDB_CMD_GO)
+			kdb_printf("\nUnexpected kdb_local return code %d\n",
+				   result);
+		KDB_DEBUG_STATE("kdb_main_loop 4", reason);
+		break;
+	}
+	if (KDB_STATE(DOING_SS))
+		KDB_STATE_CLEAR(SSBPT);
+
+	return result;
+}
+
+/*
+ * kdb_mdr - This function implements the guts of the 'mdr', memory
+ * read command.
+ *	mdr  <addr arg>,<byte count>
+ * Inputs:
+ *	addr	Start address
+ *	count	Number of bytes
+ * Returns:
+ *	Always 0.  Any errors are detected and printed by kdb_getarea.
+ */
+static int kdb_mdr(unsigned long addr, unsigned int count)
+{
+	unsigned char c;
+	while (count--) {
+		if (kdb_getarea(c, addr))
+			return 0;
+		kdb_printf("%02x", c);
+		addr++;
+	}
+	kdb_printf("\n");
+	return 0;
+}
+
+/*
+ * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
+ *	'md8' 'mdr' and 'mds' commands.
+ *
+ *	md|mds  [<addr arg> [<line count> [<radix>]]]
+ *	mdWcN	[<addr arg> [<line count> [<radix>]]]
+ *		where W = is the width (1, 2, 4 or 8) and N is the count.
+ *		for eg., md1c20 reads 20 bytes, 1 at a time.
+ *	mdr  <addr arg>,<byte count>
+ */
+static void kdb_md_line(const char *fmtstr, unsigned long addr,
+			int symbolic, int nosect, int bytesperword,
+			int num, int repeat, int phys)
+{
+	/* print just one line of data */
+	kdb_symtab_t symtab;
+	char cbuf[32];
+	char *c = cbuf;
+	int i;
+	unsigned long word;
+
+	memset(cbuf, '\0', sizeof(cbuf));
+	if (phys)
+		kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
+	else
+		kdb_printf(kdb_machreg_fmt0 " ", addr);
+
+	for (i = 0; i < num && repeat--; i++) {
+		if (phys) {
+			if (kdb_getphysword(&word, addr, bytesperword))
+				break;
+		} else if (kdb_getword(&word, addr, bytesperword))
+			break;
+		kdb_printf(fmtstr, word);
+		if (symbolic)
+			kdbnearsym(word, &symtab);
+		else
+			memset(&symtab, 0, sizeof(symtab));
+		if (symtab.sym_name) {
+			kdb_symbol_print(word, &symtab, 0);
+			if (!nosect) {
+				kdb_printf("\n");
+				kdb_printf("                       %s %s "
+					   kdb_machreg_fmt " "
+					   kdb_machreg_fmt " "
+					   kdb_machreg_fmt, symtab.mod_name,
+					   symtab.sec_name, symtab.sec_start,
+					   symtab.sym_start, symtab.sym_end);
+			}
+			addr += bytesperword;
+		} else {
+			union {
+				u64 word;
+				unsigned char c[8];
+			} wc;
+			unsigned char *cp;
+#ifdef	__BIG_ENDIAN
+			cp = wc.c + 8 - bytesperword;
+#else
+			cp = wc.c;
+#endif
+			wc.word = word;
+#define printable_char(c) \
+	({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
+			switch (bytesperword) {
+			case 8:
+				*c++ = printable_char(*cp++);
+				*c++ = printable_char(*cp++);
+				*c++ = printable_char(*cp++);
+				*c++ = printable_char(*cp++);
+				addr += 4;
+			case 4:
+				*c++ = printable_char(*cp++);
+				*c++ = printable_char(*cp++);
+				addr += 2;
+			case 2:
+				*c++ = printable_char(*cp++);
+				addr++;
+			case 1:
+				*c++ = printable_char(*cp++);
+				addr++;
+				break;
+			}
+#undef printable_char
+		}
+	}
+	kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
+		   " ", cbuf);
+}
+
+static int kdb_md(int argc, const char **argv)
+{
+	static unsigned long last_addr;
+	static int last_radix, last_bytesperword, last_repeat;
+	int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
+	int nosect = 0;
+	char fmtchar, fmtstr[64];
+	unsigned long addr;
+	unsigned long word;
+	long offset = 0;
+	int symbolic = 0;
+	int valid = 0;
+	int phys = 0;
+
+	kdbgetintenv("MDCOUNT", &mdcount);
+	kdbgetintenv("RADIX", &radix);
+	kdbgetintenv("BYTESPERWORD", &bytesperword);
+
+	/* Assume 'md <addr>' and start with environment values */
+	repeat = mdcount * 16 / bytesperword;
+
+	if (strcmp(argv[0], "mdr") == 0) {
+		if (argc != 2)
+			return KDB_ARGCOUNT;
+		valid = 1;
+	} else if (isdigit(argv[0][2])) {
+		bytesperword = (int)(argv[0][2] - '0');
+		if (bytesperword == 0) {
+			bytesperword = last_bytesperword;
+			if (bytesperword == 0)
+				bytesperword = 4;
+		}
+		last_bytesperword = bytesperword;
+		repeat = mdcount * 16 / bytesperword;
+		if (!argv[0][3])
+			valid = 1;
+		else if (argv[0][3] == 'c' && argv[0][4]) {
+			char *p;
+			repeat = simple_strtoul(argv[0] + 4, &p, 10);
+			mdcount = ((repeat * bytesperword) + 15) / 16;
+			valid = !*p;
+		}
+		last_repeat = repeat;
+	} else if (strcmp(argv[0], "md") == 0)
+		valid = 1;
+	else if (strcmp(argv[0], "mds") == 0)
+		valid = 1;
+	else if (strcmp(argv[0], "mdp") == 0) {
+		phys = valid = 1;
+	}
+	if (!valid)
+		return KDB_NOTFOUND;
+
+	if (argc == 0) {
+		if (last_addr == 0)
+			return KDB_ARGCOUNT;
+		addr = last_addr;
+		radix = last_radix;
+		bytesperword = last_bytesperword;
+		repeat = last_repeat;
+		mdcount = ((repeat * bytesperword) + 15) / 16;
+	}
+
+	if (argc) {
+		unsigned long val;
+		int diag, nextarg = 1;
+		diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+				     &offset, NULL);
+		if (diag)
+			return diag;
+		if (argc > nextarg+2)
+			return KDB_ARGCOUNT;
+
+		if (argc >= nextarg) {
+			diag = kdbgetularg(argv[nextarg], &val);
+			if (!diag) {
+				mdcount = (int) val;
+				repeat = mdcount * 16 / bytesperword;
+			}
+		}
+		if (argc >= nextarg+1) {
+			diag = kdbgetularg(argv[nextarg+1], &val);
+			if (!diag)
+				radix = (int) val;
+		}
+	}
+
+	if (strcmp(argv[0], "mdr") == 0)
+		return kdb_mdr(addr, mdcount);
+
+	switch (radix) {
+	case 10:
+		fmtchar = 'd';
+		break;
+	case 16:
+		fmtchar = 'x';
+		break;
+	case 8:
+		fmtchar = 'o';
+		break;
+	default:
+		return KDB_BADRADIX;
+	}
+
+	last_radix = radix;
+
+	if (bytesperword > KDB_WORD_SIZE)
+		return KDB_BADWIDTH;
+
+	switch (bytesperword) {
+	case 8:
+		sprintf(fmtstr, "%%16.16l%c ", fmtchar);
+		break;
+	case 4:
+		sprintf(fmtstr, "%%8.8l%c ", fmtchar);
+		break;
+	case 2:
+		sprintf(fmtstr, "%%4.4l%c ", fmtchar);
+		break;
+	case 1:
+		sprintf(fmtstr, "%%2.2l%c ", fmtchar);
+		break;
+	default:
+		return KDB_BADWIDTH;
+	}
+
+	last_repeat = repeat;
+	last_bytesperword = bytesperword;
+
+	if (strcmp(argv[0], "mds") == 0) {
+		symbolic = 1;
+		/* Do not save these changes as last_*, they are temporary mds
+		 * overrides.
+		 */
+		bytesperword = KDB_WORD_SIZE;
+		repeat = mdcount;
+		kdbgetintenv("NOSECT", &nosect);
+	}
+
+	/* Round address down modulo BYTESPERWORD */
+
+	addr &= ~(bytesperword-1);
+
+	while (repeat > 0) {
+		unsigned long a;
+		int n, z, num = (symbolic ? 1 : (16 / bytesperword));
+
+		if (KDB_FLAG(CMD_INTERRUPT))
+			return 0;
+		for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
+			if (phys) {
+				if (kdb_getphysword(&word, a, bytesperword)
+						|| word)
+					break;
+			} else if (kdb_getword(&word, a, bytesperword) || word)
+				break;
+		}
+		n = min(num, repeat);
+		kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
+			    num, repeat, phys);
+		addr += bytesperword * n;
+		repeat -= n;
+		z = (z + num - 1) / num;
+		if (z > 2) {
+			int s = num * (z-2);
+			kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
+				   " zero suppressed\n",
+				addr, addr + bytesperword * s - 1);
+			addr += bytesperword * s;
+			repeat -= s;
+		}
+	}
+	last_addr = addr;
+
+	return 0;
+}
+
+/*
+ * kdb_mm - This function implements the 'mm' command.
+ *	mm address-expression new-value
+ * Remarks:
+ *	mm works on machine words, mmW works on bytes.
+ */
+static int kdb_mm(int argc, const char **argv)
+{
+	int diag;
+	unsigned long addr;
+	long offset = 0;
+	unsigned long contents;
+	int nextarg;
+	int width;
+
+	if (argv[0][2] && !isdigit(argv[0][2]))
+		return KDB_NOTFOUND;
+
+	if (argc < 2)
+		return KDB_ARGCOUNT;
+
+	nextarg = 1;
+	diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+	if (diag)
+		return diag;
+
+	if (nextarg > argc)
+		return KDB_ARGCOUNT;
+	diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
+	if (diag)
+		return diag;
+
+	if (nextarg != argc + 1)
+		return KDB_ARGCOUNT;
+
+	width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
+	diag = kdb_putword(addr, contents, width);
+	if (diag)
+		return diag;
+
+	kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
+
+	return 0;
+}
+
+/*
+ * kdb_go - This function implements the 'go' command.
+ *	go [address-expression]
+ */
+static int kdb_go(int argc, const char **argv)
+{
+	unsigned long addr;
+	int diag;
+	int nextarg;
+	long offset;
+
+	if (argc == 1) {
+		if (raw_smp_processor_id() != kdb_initial_cpu) {
+			kdb_printf("go <address> must be issued from the "
+				   "initial cpu, do cpu %d first\n",
+				   kdb_initial_cpu);
+			return KDB_ARGCOUNT;
+		}
+		nextarg = 1;
+		diag = kdbgetaddrarg(argc, argv, &nextarg,
+				     &addr, &offset, NULL);
+		if (diag)
+			return diag;
+	} else if (argc) {
+		return KDB_ARGCOUNT;
+	}
+
+	diag = KDB_CMD_GO;
+	if (KDB_FLAG(CATASTROPHIC)) {
+		kdb_printf("Catastrophic error detected\n");
+		kdb_printf("kdb_continue_catastrophic=%d, ",
+			kdb_continue_catastrophic);
+		if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
+			kdb_printf("type go a second time if you really want "
+				   "to continue\n");
+			return 0;
+		}
+		if (kdb_continue_catastrophic == 2) {
+			kdb_printf("forcing reboot\n");
+			kdb_reboot(0, NULL);
+		}
+		kdb_printf("attempting to continue\n");
+	}
+	return diag;
+}
+
+/*
+ * kdb_rd - This function implements the 'rd' command.
+ */
+static int kdb_rd(int argc, const char **argv)
+{
+	int diag = kdb_check_regs();
+	if (diag)
+		return diag;
+
+	kdb_dumpregs(kdb_current_regs);
+	return 0;
+}
+
+/*
+ * kdb_rm - This function implements the 'rm' (register modify)  command.
+ *	rm register-name new-contents
+ * Remarks:
+ *	Currently doesn't allow modification of control or
+ *	debug registers.
+ */
+static int kdb_rm(int argc, const char **argv)
+{
+	int diag;
+	int ind = 0;
+	unsigned long contents;
+
+	if (argc != 2)
+		return KDB_ARGCOUNT;
+	/*
+	 * Allow presence or absence of leading '%' symbol.
+	 */
+	if (argv[1][0] == '%')
+		ind = 1;
+
+	diag = kdbgetularg(argv[2], &contents);
+	if (diag)
+		return diag;
+
+	diag = kdb_check_regs();
+	if (diag)
+		return diag;
+	kdb_printf("ERROR: Register set currently not implemented\n");
+	return 0;
+}
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+/*
+ * kdb_sr - This function implements the 'sr' (SYSRQ key) command
+ *	which interfaces to the soi-disant MAGIC SYSRQ functionality.
+ *		sr <magic-sysrq-code>
+ */
+static int kdb_sr(int argc, const char **argv)
+{
+	if (argc != 1)
+		return KDB_ARGCOUNT;
+	sysrq_toggle_support(1);
+	kdb_trap_printk++;
+	handle_sysrq(*argv[1], NULL);
+	kdb_trap_printk--;
+
+	return 0;
+}
+#endif	/* CONFIG_MAGIC_SYSRQ */
+
+/*
+ * kdb_ef - This function implements the 'regs' (display exception
+ *	frame) command.  This command takes an address and expects to
+ *	find an exception frame at that address, formats and prints
+ *	it.
+ *		regs address-expression
+ * Remarks:
+ *	Not done yet.
+ */
+static int kdb_ef(int argc, const char **argv)
+{
+	int diag;
+	unsigned long addr;
+	long offset;
+	int nextarg;
+
+	if (argc != 1)
+		return KDB_ARGCOUNT;
+
+	nextarg = 1;
+	diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+	if (diag)
+		return diag;
+	show_regs((struct pt_regs *)addr);
+	return 0;
+}
+
+#if defined(CONFIG_MODULES)
+/* modules using other modules */
+struct module_use {
+	struct list_head list;
+	struct module *module_which_uses;
+};
+
+/*
+ * kdb_lsmod - This function implements the 'lsmod' command.  Lists
+ *	currently loaded kernel modules.
+ *	Mostly taken from userland lsmod.
+ */
+static int kdb_lsmod(int argc, const char **argv)
+{
+	struct module *mod;
+
+	if (argc != 0)
+		return KDB_ARGCOUNT;
+
+	kdb_printf("Module                  Size  modstruct     Used by\n");
+	list_for_each_entry(mod, kdb_modules, list) {
+
+		kdb_printf("%-20s%8u  0x%p ", mod->name,
+			   mod->core_size, (void *)mod);
+#ifdef CONFIG_MODULE_UNLOAD
+		kdb_printf("%4d ", module_refcount(mod));
+#endif
+		if (mod->state == MODULE_STATE_GOING)
+			kdb_printf(" (Unloading)");
+		else if (mod->state == MODULE_STATE_COMING)
+			kdb_printf(" (Loading)");
+		else
+			kdb_printf(" (Live)");
+
+#ifdef CONFIG_MODULE_UNLOAD
+		{
+			struct module_use *use;
+			kdb_printf(" [ ");
+			list_for_each_entry(use, &mod->modules_which_use_me,
+					    list)
+				kdb_printf("%s ", use->module_which_uses->name);
+			kdb_printf("]\n");
+		}
+#endif
+	}
+
+	return 0;
+}
+
+#endif	/* CONFIG_MODULES */
+
+/*
+ * kdb_env - This function implements the 'env' command.  Display the
+ *	current environment variables.
+ */
+
+static int kdb_env(int argc, const char **argv)
+{
+	int i;
+
+	for (i = 0; i < __nenv; i++) {
+		if (__env[i])
+			kdb_printf("%s\n", __env[i]);
+	}
+
+	if (KDB_DEBUG(MASK))
+		kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
+
+	return 0;
+}
+
+#ifdef CONFIG_PRINTK
+/*
+ * kdb_dmesg - This function implements the 'dmesg' command to display
+ *	the contents of the syslog buffer.
+ *		dmesg [lines] [adjust]
+ */
+static int kdb_dmesg(int argc, const char **argv)
+{
+	char *syslog_data[4], *start, *end, c = '\0', *p;
+	int diag, logging, logsize, lines = 0, adjust = 0, n;
+
+	if (argc > 2)
+		return KDB_ARGCOUNT;
+	if (argc) {
+		char *cp;
+		lines = simple_strtol(argv[1], &cp, 0);
+		if (*cp)
+			lines = 0;
+		if (argc > 1) {
+			adjust = simple_strtoul(argv[2], &cp, 0);
+			if (*cp || adjust < 0)
+				adjust = 0;
+		}
+	}
+
+	/* disable LOGGING if set */
+	diag = kdbgetintenv("LOGGING", &logging);
+	if (!diag && logging) {
+		const char *setargs[] = { "set", "LOGGING", "0" };
+		kdb_set(2, setargs);
+	}
+
+	/* syslog_data[0,1] physical start, end+1.  syslog_data[2,3]
+	 * logical start, end+1. */
+	kdb_syslog_data(syslog_data);
+	if (syslog_data[2] == syslog_data[3])
+		return 0;
+	logsize = syslog_data[1] - syslog_data[0];
+	start = syslog_data[2];
+	end = syslog_data[3];
+#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
+	for (n = 0, p = start; p < end; ++p) {
+		c = *KDB_WRAP(p);
+		if (c == '\n')
+			++n;
+	}
+	if (c != '\n')
+		++n;
+	if (lines < 0) {
+		if (adjust >= n)
+			kdb_printf("buffer only contains %d lines, nothing "
+				   "printed\n", n);
+		else if (adjust - lines >= n)
+			kdb_printf("buffer only contains %d lines, last %d "
+				   "lines printed\n", n, n - adjust);
+		if (adjust) {
+			for (; start < end && adjust; ++start) {
+				if (*KDB_WRAP(start) == '\n')
+					--adjust;
+			}
+			if (start < end)
+				++start;
+		}
+		for (p = start; p < end && lines; ++p) {
+			if (*KDB_WRAP(p) == '\n')
+				++lines;
+		}
+		end = p;
+	} else if (lines > 0) {
+		int skip = n - (adjust + lines);
+		if (adjust >= n) {
+			kdb_printf("buffer only contains %d lines, "
+				   "nothing printed\n", n);
+			skip = n;
+		} else if (skip < 0) {
+			lines += skip;
+			skip = 0;
+			kdb_printf("buffer only contains %d lines, first "
+				   "%d lines printed\n", n, lines);
+		}
+		for (; start < end && skip; ++start) {
+			if (*KDB_WRAP(start) == '\n')
+				--skip;
+		}
+		for (p = start; p < end && lines; ++p) {
+			if (*KDB_WRAP(p) == '\n')
+				--lines;
+		}
+		end = p;
+	}
+	/* Do a line at a time (max 200 chars) to reduce protocol overhead */
+	c = '\n';
+	while (start != end) {
+		char buf[201];
+		p = buf;
+		if (KDB_FLAG(CMD_INTERRUPT))
+			return 0;
+		while (start < end && (c = *KDB_WRAP(start)) &&
+		       (p - buf) < sizeof(buf)-1) {
+			++start;
+			*p++ = c;
+			if (c == '\n')
+				break;
+		}
+		*p = '\0';
+		kdb_printf("%s", buf);
+	}
+	if (c != '\n')
+		kdb_printf("\n");
+
+	return 0;
+}
+#endif /* CONFIG_PRINTK */
+/*
+ * kdb_cpu - This function implements the 'cpu' command.
+ *	cpu	[<cpunum>]
+ * Returns:
+ *	KDB_CMD_CPU for success, a kdb diagnostic if error
+ */
+static void kdb_cpu_status(void)
+{
+	int i, start_cpu, first_print = 1;
+	char state, prev_state = '?';
+
+	kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
+	kdb_printf("Available cpus: ");
+	for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
+		if (!cpu_online(i)) {
+			state = 'F';	/* cpu is offline */
+		} else {
+			state = ' ';	/* cpu is responding to kdb */
+			if (kdb_task_state_char(KDB_TSK(i)) == 'I')
+				state = 'I';	/* idle task */
+		}
+		if (state != prev_state) {
+			if (prev_state != '?') {
+				if (!first_print)
+					kdb_printf(", ");
+				first_print = 0;
+				kdb_printf("%d", start_cpu);
+				if (start_cpu < i-1)
+					kdb_printf("-%d", i-1);
+				if (prev_state != ' ')
+					kdb_printf("(%c)", prev_state);
+			}
+			prev_state = state;
+			start_cpu = i;
+		}
+	}
+	/* print the trailing cpus, ignoring them if they are all offline */
+	if (prev_state != 'F') {
+		if (!first_print)
+			kdb_printf(", ");
+		kdb_printf("%d", start_cpu);
+		if (start_cpu < i-1)
+			kdb_printf("-%d", i-1);
+		if (prev_state != ' ')
+			kdb_printf("(%c)", prev_state);
+	}
+	kdb_printf("\n");
+}
+
+static int kdb_cpu(int argc, const char **argv)
+{
+	unsigned long cpunum;
+	int diag;
+
+	if (argc == 0) {
+		kdb_cpu_status();
+		return 0;
+	}
+
+	if (argc != 1)
+		return KDB_ARGCOUNT;
+
+	diag = kdbgetularg(argv[1], &cpunum);
+	if (diag)
+		return diag;
+
+	/*
+	 * Validate cpunum
+	 */
+	if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+		return KDB_BADCPUNUM;
+
+	dbg_switch_cpu = cpunum;
+
+	/*
+	 * Switch to other cpu
+	 */
+	return KDB_CMD_CPU;
+}
+
+/* The user may not realize that ps/bta with no parameters does not print idle
+ * or sleeping system daemon processes, so tell them how many were suppressed.
+ */
+void kdb_ps_suppressed(void)
+{
+	int idle = 0, daemon = 0;
+	unsigned long mask_I = kdb_task_state_string("I"),
+		      mask_M = kdb_task_state_string("M");
+	unsigned long cpu;
+	const struct task_struct *p, *g;
+	for_each_online_cpu(cpu) {
+		p = kdb_curr_task(cpu);
+		if (kdb_task_state(p, mask_I))
+			++idle;
+	}
+	kdb_do_each_thread(g, p) {
+		if (kdb_task_state(p, mask_M))
+			++daemon;
+	} kdb_while_each_thread(g, p);
+	if (idle || daemon) {
+		if (idle)
+			kdb_printf("%d idle process%s (state I)%s\n",
+				   idle, idle == 1 ? "" : "es",
+				   daemon ? " and " : "");
+		if (daemon)
+			kdb_printf("%d sleeping system daemon (state M) "
+				   "process%s", daemon,
+				   daemon == 1 ? "" : "es");
+		kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
+	}
+}
+
+/*
+ * kdb_ps - This function implements the 'ps' command which shows a
+ *	list of the active processes.
+ *		ps [DRSTCZEUIMA]   All processes, optionally filtered by state
+ */
+void kdb_ps1(const struct task_struct *p)
+{
+	int cpu;
+	unsigned long tmp;
+
+	if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+		return;
+
+	cpu = kdb_process_cpu(p);
+	kdb_printf("0x%p %8d %8d  %d %4d   %c  0x%p %c%s\n",
+		   (void *)p, p->pid, p->parent->pid,
+		   kdb_task_has_cpu(p), kdb_process_cpu(p),
+		   kdb_task_state_char(p),
+		   (void *)(&p->thread),
+		   p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
+		   p->comm);
+	if (kdb_task_has_cpu(p)) {
+		if (!KDB_TSK(cpu)) {
+			kdb_printf("  Error: no saved data for this cpu\n");
+		} else {
+			if (KDB_TSK(cpu) != p)
+				kdb_printf("  Error: does not match running "
+				   "process table (0x%p)\n", KDB_TSK(cpu));
+		}
+	}
+}
+
+static int kdb_ps(int argc, const char **argv)
+{
+	struct task_struct *g, *p;
+	unsigned long mask, cpu;
+
+	if (argc == 0)
+		kdb_ps_suppressed();
+	kdb_printf("%-*s      Pid   Parent [*] cpu State %-*s Command\n",
+		(int)(2*sizeof(void *))+2, "Task Addr",
+		(int)(2*sizeof(void *))+2, "Thread");
+	mask = kdb_task_state_string(argc ? argv[1] : NULL);
+	/* Run the active tasks first */
+	for_each_online_cpu(cpu) {
+		if (KDB_FLAG(CMD_INTERRUPT))
+			return 0;
+		p = kdb_curr_task(cpu);
+		if (kdb_task_state(p, mask))
+			kdb_ps1(p);
+	}
+	kdb_printf("\n");
+	/* Now the real tasks */
+	kdb_do_each_thread(g, p) {
+		if (KDB_FLAG(CMD_INTERRUPT))
+			return 0;
+		if (kdb_task_state(p, mask))
+			kdb_ps1(p);
+	} kdb_while_each_thread(g, p);
+
+	return 0;
+}
+
+/*
+ * kdb_pid - This function implements the 'pid' command which switches
+ *	the currently active process.
+ *		pid [<pid> | R]
+ */
+static int kdb_pid(int argc, const char **argv)
+{
+	struct task_struct *p;
+	unsigned long val;
+	int diag;
+
+	if (argc > 1)
+		return KDB_ARGCOUNT;
+
+	if (argc) {
+		if (strcmp(argv[1], "R") == 0) {
+			p = KDB_TSK(kdb_initial_cpu);
+		} else {
+			diag = kdbgetularg(argv[1], &val);
+			if (diag)
+				return KDB_BADINT;
+
+			p = find_task_by_pid_ns((pid_t)val,	&init_pid_ns);
+			if (!p) {
+				kdb_printf("No task with pid=%d\n", (pid_t)val);
+				return 0;
+			}
+		}
+		kdb_set_current_task(p);
+	}
+	kdb_printf("KDB current process is %s(pid=%d)\n",
+		   kdb_current_task->comm,
+		   kdb_current_task->pid);
+
+	return 0;
+}
+
+/*
+ * kdb_ll - This function implements the 'll' command which follows a
+ *	linked list and executes an arbitrary command for each
+ *	element.
+ */
+static int kdb_ll(int argc, const char **argv)
+{
+	int diag;
+	unsigned long addr;
+	long offset = 0;
+	unsigned long va;
+	unsigned long linkoffset;
+	int nextarg;
+	const char *command;
+
+	if (argc != 3)
+		return KDB_ARGCOUNT;
+
+	nextarg = 1;
+	diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+	if (diag)
+		return diag;
+
+	diag = kdbgetularg(argv[2], &linkoffset);
+	if (diag)
+		return diag;
+
+	/*
+	 * Using the starting address as
+	 * the first element in the list, and assuming that
+	 * the list ends with a null pointer.
+	 */
+
+	va = addr;
+	command = kdb_strdup(argv[3], GFP_KDB);
+	if (!command) {
+		kdb_printf("%s: cannot duplicate command\n", __func__);
+		return 0;
+	}
+	/* Recursive use of kdb_parse, do not use argv after this point */
+	argv = NULL;
+
+	while (va) {
+		char buf[80];
+
+		sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
+		diag = kdb_parse(buf);
+		if (diag)
+			return diag;
+
+		addr = va + linkoffset;
+		if (kdb_getword(&va, addr, sizeof(va)))
+			return 0;
+	}
+	kfree(command);
+
+	return 0;
+}
+
+static int kdb_kgdb(int argc, const char **argv)
+{
+	return KDB_CMD_KGDB;
+}
+
+/*
+ * kdb_help - This function implements the 'help' and '?' commands.
+ */
+static int kdb_help(int argc, const char **argv)
+{
+	kdbtab_t *kt;
+	int i;
+
+	kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
+	kdb_printf("-----------------------------"
+		   "-----------------------------\n");
+	for_each_kdbcmd(kt, i) {
+		if (kt->cmd_name)
+			kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
+				   kt->cmd_usage, kt->cmd_help);
+		if (KDB_FLAG(CMD_INTERRUPT))
+			return 0;
+	}
+	return 0;
+}
+
+/*
+ * kdb_kill - This function implements the 'kill' commands.
+ */
+static int kdb_kill(int argc, const char **argv)
+{
+	long sig, pid;
+	char *endp;
+	struct task_struct *p;
+	struct siginfo info;
+
+	if (argc != 2)
+		return KDB_ARGCOUNT;
+
+	sig = simple_strtol(argv[1], &endp, 0);
+	if (*endp)
+		return KDB_BADINT;
+	if (sig >= 0) {
+		kdb_printf("Invalid signal parameter.<-signal>\n");
+		return 0;
+	}
+	sig = -sig;
+
+	pid = simple_strtol(argv[2], &endp, 0);
+	if (*endp)
+		return KDB_BADINT;
+	if (pid <= 0) {
+		kdb_printf("Process ID must be large than 0.\n");
+		return 0;
+	}
+
+	/* Find the process. */
+	p = find_task_by_pid_ns(pid, &init_pid_ns);
+	if (!p) {
+		kdb_printf("The specified process isn't found.\n");
+		return 0;
+	}
+	p = p->group_leader;
+	info.si_signo = sig;
+	info.si_errno = 0;
+	info.si_code = SI_USER;
+	info.si_pid = pid;  /* same capabilities as process being signalled */
+	info.si_uid = 0;    /* kdb has root authority */
+	kdb_send_sig_info(p, &info);
+	return 0;
+}
+
+struct kdb_tm {
+	int tm_sec;	/* seconds */
+	int tm_min;	/* minutes */
+	int tm_hour;	/* hours */
+	int tm_mday;	/* day of the month */
+	int tm_mon;	/* month */
+	int tm_year;	/* year */
+};
+
+static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
+{
+	/* This will work from 1970-2099, 2100 is not a leap year */
+	static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
+				 31, 30, 31, 30, 31 };
+	memset(tm, 0, sizeof(*tm));
+	tm->tm_sec  = tv->tv_sec % (24 * 60 * 60);
+	tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
+		(2 * 365 + 1); /* shift base from 1970 to 1968 */
+	tm->tm_min =  tm->tm_sec / 60 % 60;
+	tm->tm_hour = tm->tm_sec / 60 / 60;
+	tm->tm_sec =  tm->tm_sec % 60;
+	tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
+	tm->tm_mday %= (4*365+1);
+	mon_day[1] = 29;
+	while (tm->tm_mday >= mon_day[tm->tm_mon]) {
+		tm->tm_mday -= mon_day[tm->tm_mon];
+		if (++tm->tm_mon == 12) {
+			tm->tm_mon = 0;
+			++tm->tm_year;
+			mon_day[1] = 28;
+		}
+	}
+	++tm->tm_mday;
+}
+
+/*
+ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
+ * I cannot call that code directly from kdb, it has an unconditional
+ * cli()/sti() and calls routines that take locks which can stop the debugger.
+ */
+static void kdb_sysinfo(struct sysinfo *val)
+{
+	struct timespec uptime;
+	do_posix_clock_monotonic_gettime(&uptime);
+	memset(val, 0, sizeof(*val));
+	val->uptime = uptime.tv_sec;
+	val->loads[0] = avenrun[0];
+	val->loads[1] = avenrun[1];
+	val->loads[2] = avenrun[2];
+	val->procs = nr_threads-1;
+	si_meminfo(val);
+
+	return;
+}
+
+/*
+ * kdb_summary - This function implements the 'summary' command.
+ */
+static int kdb_summary(int argc, const char **argv)
+{
+	struct kdb_tm tm;
+	struct sysinfo val;
+
+	if (argc)
+		return KDB_ARGCOUNT;
+
+	kdb_printf("sysname    %s\n", init_uts_ns.name.sysname);
+	kdb_printf("release    %s\n", init_uts_ns.name.release);
+	kdb_printf("version    %s\n", init_uts_ns.name.version);
+	kdb_printf("machine    %s\n", init_uts_ns.name.machine);
+	kdb_printf("nodename   %s\n", init_uts_ns.name.nodename);
+	kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
+	kdb_printf("ccversion  %s\n", __stringify(CCVERSION));
+
+	kdb_gmtime(&xtime, &tm);
+	kdb_printf("date       %04d-%02d-%02d %02d:%02d:%02d "
+		   "tz_minuteswest %d\n",
+		1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec,
+		sys_tz.tz_minuteswest);
+
+	kdb_sysinfo(&val);
+	kdb_printf("uptime     ");
+	if (val.uptime > (24*60*60)) {
+		int days = val.uptime / (24*60*60);
+		val.uptime %= (24*60*60);
+		kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
+	}
+	kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
+
+	/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+	kdb_printf("load avg   %ld.%02ld %ld.%02ld %ld.%02ld\n",
+		LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
+		LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
+		LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
+#undef LOAD_INT
+#undef LOAD_FRAC
+	/* Display in kilobytes */
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
+		   "Buffers:        %8lu kB\n",
+		   val.totalram, val.freeram, val.bufferram);
+	return 0;
+}
+
+/*
+ * kdb_per_cpu - This function implements the 'per_cpu' command.
+ */
+static int kdb_per_cpu(int argc, const char **argv)
+{
+	char buf[256], fmtstr[64];
+	kdb_symtab_t symtab;
+	cpumask_t suppress = CPU_MASK_NONE;
+	int cpu, diag;
+	unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
+
+	if (argc < 1 || argc > 3)
+		return KDB_ARGCOUNT;
+
+	snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]);
+	if (!kdbgetsymval(buf, &symtab)) {
+		kdb_printf("%s is not a per_cpu variable\n", argv[1]);
+		return KDB_BADADDR;
+	}
+	if (argc >= 2) {
+		diag = kdbgetularg(argv[2], &bytesperword);
+		if (diag)
+			return diag;
+	}
+	if (!bytesperword)
+		bytesperword = KDB_WORD_SIZE;
+	else if (bytesperword > KDB_WORD_SIZE)
+		return KDB_BADWIDTH;
+	sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
+	if (argc >= 3) {
+		diag = kdbgetularg(argv[3], &whichcpu);
+		if (diag)
+			return diag;
+		if (!cpu_online(whichcpu)) {
+			kdb_printf("cpu %ld is not online\n", whichcpu);
+			return KDB_BADCPUNUM;
+		}
+	}
+
+	/* Most architectures use __per_cpu_offset[cpu], some use
+	 * __per_cpu_offset(cpu), smp has no __per_cpu_offset.
+	 */
+#ifdef	__per_cpu_offset
+#define KDB_PCU(cpu) __per_cpu_offset(cpu)
+#else
+#ifdef	CONFIG_SMP
+#define KDB_PCU(cpu) __per_cpu_offset[cpu]
+#else
+#define KDB_PCU(cpu) 0
+#endif
+#endif
+
+	for_each_online_cpu(cpu) {
+		if (whichcpu != ~0UL && whichcpu != cpu)
+			continue;
+		addr = symtab.sym_start + KDB_PCU(cpu);
+		diag = kdb_getword(&val, addr, bytesperword);
+		if (diag) {
+			kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
+				   "read, diag=%d\n", cpu, addr, diag);
+			continue;
+		}
+#ifdef	CONFIG_SMP
+		if (!val) {
+			cpu_set(cpu, suppress);
+			continue;
+		}
+#endif	/* CONFIG_SMP */
+		kdb_printf("%5d ", cpu);
+		kdb_md_line(fmtstr, addr,
+			bytesperword == KDB_WORD_SIZE,
+			1, bytesperword, 1, 1, 0);
+	}
+	if (cpus_weight(suppress) == 0)
+		return 0;
+	kdb_printf("Zero suppressed cpu(s):");
+	for (cpu = first_cpu(suppress); cpu < num_possible_cpus();
+	     cpu = next_cpu(cpu, suppress)) {
+		kdb_printf(" %d", cpu);
+		if (cpu == num_possible_cpus() - 1 ||
+		    next_cpu(cpu, suppress) != cpu + 1)
+			continue;
+		while (cpu < num_possible_cpus() &&
+		       next_cpu(cpu, suppress) == cpu + 1)
+			++cpu;
+		kdb_printf("-%d", cpu);
+	}
+	kdb_printf("\n");
+
+#undef KDB_PCU
+
+	return 0;
+}
+
+/*
+ * display help for the use of cmd | grep pattern
+ */
+static int kdb_grep_help(int argc, const char **argv)
+{
+	kdb_printf("Usage of  cmd args | grep pattern:\n");
+	kdb_printf("  Any command's output may be filtered through an ");
+	kdb_printf("emulated 'pipe'.\n");
+	kdb_printf("  'grep' is just a key word.\n");
+	kdb_printf("  The pattern may include a very limited set of "
+		   "metacharacters:\n");
+	kdb_printf("   pattern or ^pattern or pattern$ or ^pattern$\n");
+	kdb_printf("  And if there are spaces in the pattern, you may "
+		   "quote it:\n");
+	kdb_printf("   \"pat tern\" or \"^pat tern\" or \"pat tern$\""
+		   " or \"^pat tern$\"\n");
+	return 0;
+}
+
+/*
+ * kdb_register_repeat - This function is used to register a kernel
+ * 	debugger command.
+ * Inputs:
+ *	cmd	Command name
+ *	func	Function to execute the command
+ *	usage	A simple usage string showing arguments
+ *	help	A simple help string describing command
+ *	repeat	Does the command auto repeat on enter?
+ * Returns:
+ *	zero for success, one if a duplicate command.
+ */
+#define kdb_command_extend 50	/* arbitrary */
+int kdb_register_repeat(char *cmd,
+			kdb_func_t func,
+			char *usage,
+			char *help,
+			short minlen,
+			kdb_repeat_t repeat)
+{
+	int i;
+	kdbtab_t *kp;
+
+	/*
+	 *  Brute force method to determine duplicates
+	 */
+	for_each_kdbcmd(kp, i) {
+		if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+			kdb_printf("Duplicate kdb command registered: "
+				"%s, func %p help %s\n", cmd, func, help);
+			return 1;
+		}
+	}
+
+	/*
+	 * Insert command into first available location in table
+	 */
+	for_each_kdbcmd(kp, i) {
+		if (kp->cmd_name == NULL)
+			break;
+	}
+
+	if (i >= kdb_max_commands) {
+		kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
+			 kdb_command_extend) * sizeof(*new), GFP_KDB);
+		if (!new) {
+			kdb_printf("Could not allocate new kdb_command "
+				   "table\n");
+			return 1;
+		}
+		if (kdb_commands) {
+			memcpy(new, kdb_commands,
+			       kdb_max_commands * sizeof(*new));
+			kfree(kdb_commands);
+		}
+		memset(new + kdb_max_commands, 0,
+		       kdb_command_extend * sizeof(*new));
+		kdb_commands = new;
+		kp = kdb_commands + kdb_max_commands;
+		kdb_max_commands += kdb_command_extend;
+	}
+
+	kp->cmd_name   = cmd;
+	kp->cmd_func   = func;
+	kp->cmd_usage  = usage;
+	kp->cmd_help   = help;
+	kp->cmd_flags  = 0;
+	kp->cmd_minlen = minlen;
+	kp->cmd_repeat = repeat;
+
+	return 0;
+}
+
+/*
+ * kdb_register - Compatibility register function for commands that do
+ *	not need to specify a repeat state.  Equivalent to
+ *	kdb_register_repeat with KDB_REPEAT_NONE.
+ * Inputs:
+ *	cmd	Command name
+ *	func	Function to execute the command
+ *	usage	A simple usage string showing arguments
+ *	help	A simple help string describing command
+ * Returns:
+ *	zero for success, one if a duplicate command.
+ */
+int kdb_register(char *cmd,
+	     kdb_func_t func,
+	     char *usage,
+	     char *help,
+	     short minlen)
+{
+	return kdb_register_repeat(cmd, func, usage, help, minlen,
+				   KDB_REPEAT_NONE);
+}
+
+/*
+ * kdb_unregister - This function is used to unregister a kernel
+ *	debugger command.  It is generally called when a module which
+ *	implements kdb commands is unloaded.
+ * Inputs:
+ *	cmd	Command name
+ * Returns:
+ *	zero for success, one command not registered.
+ */
+int kdb_unregister(char *cmd)
+{
+	int i;
+	kdbtab_t *kp;
+
+	/*
+	 *  find the command.
+	 */
+	for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+		if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+			kp->cmd_name = NULL;
+			return 0;
+		}
+	}
+
+	/* Couldn't find it.  */
+	return 1;
+}
+
+/* Initialize the kdb command table. */
+static void __init kdb_inittab(void)
+{
+	int i;
+	kdbtab_t *kp;
+
+	for_each_kdbcmd(kp, i)
+		kp->cmd_name = NULL;
+
+	kdb_register_repeat("md", kdb_md, "<vaddr>",
+	  "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
+			    KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
+	  "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
+	  "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("mds", kdb_md, "<vaddr>",
+	  "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
+	  "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
+	kdb_register_repeat("go", kdb_go, "[<vaddr>]",
+	  "Continue Execution", 1, KDB_REPEAT_NONE);
+	kdb_register_repeat("rd", kdb_rd, "",
+	  "Display Registers", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
+	  "Modify Registers", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("ef", kdb_ef, "<vaddr>",
+	  "Display exception frame", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
+	  "Stack traceback", 1, KDB_REPEAT_NONE);
+	kdb_register_repeat("btp", kdb_bt, "<pid>",
+	  "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
+	  "Display stack all processes", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("btc", kdb_bt, "",
+	  "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+	  "Backtrace process given its struct task address", 0,
+			    KDB_REPEAT_NONE);
+	kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
+	  "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("env", kdb_env, "",
+	  "Show environment variables", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("set", kdb_set, "",
+	  "Set environment variables", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("help", kdb_help, "",
+	  "Display Help Message", 1, KDB_REPEAT_NONE);
+	kdb_register_repeat("?", kdb_help, "",
+	  "Display Help Message", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
+	  "Switch to new cpu", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("kgdb", kdb_kgdb, "",
+	  "Enter kgdb mode", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
+	  "Display active task list", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("pid", kdb_pid, "<pidnum>",
+	  "Switch to another task", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("reboot", kdb_reboot, "",
+	  "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+#if defined(CONFIG_MODULES)
+	kdb_register_repeat("lsmod", kdb_lsmod, "",
+	  "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_MAGIC_SYSRQ)
+	kdb_register_repeat("sr", kdb_sr, "<key>",
+	  "Magic SysRq key", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_PRINTK)
+	kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
+	  "Display syslog buffer", 0, KDB_REPEAT_NONE);
+#endif
+	kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+	  "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
+	  "Send a signal to a process", 0, KDB_REPEAT_NONE);
+	kdb_register_repeat("summary", kdb_summary, "",
+	  "Summarize the system", 4, KDB_REPEAT_NONE);
+	kdb_register_repeat("per_cpu", kdb_per_cpu, "",
+	  "Display per_cpu variables", 3, KDB_REPEAT_NONE);
+	kdb_register_repeat("grephelp", kdb_grep_help, "",
+	  "Display help on | grep", 0, KDB_REPEAT_NONE);
+}
+
+/* Execute any commands defined in kdb_cmds.  */
+static void __init kdb_cmd_init(void)
+{
+	int i, diag;
+	for (i = 0; kdb_cmds[i]; ++i) {
+		diag = kdb_parse(kdb_cmds[i]);
+		if (diag)
+			kdb_printf("kdb command %s failed, kdb diag %d\n",
+				kdb_cmds[i], diag);
+	}
+	if (defcmd_in_progress) {
+		kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
+		kdb_parse("endefcmd");
+	}
+}
+
+/* Intialize kdb_printf, breakpoint tables and kdb state */
+void __init kdb_init(int lvl)
+{
+	static int kdb_init_lvl = KDB_NOT_INITIALIZED;
+	int i;
+
+	if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl)
+		return;
+	for (i = kdb_init_lvl; i < lvl; i++) {
+		switch (i) {
+		case KDB_NOT_INITIALIZED:
+			kdb_inittab();		/* Initialize Command Table */
+			kdb_initbptab();	/* Initialize Breakpoints */
+			break;
+		case KDB_INIT_EARLY:
+			kdb_cmd_init();		/* Build kdb_cmds tables */
+			break;
+		}
+	}
+	kdb_init_lvl = lvl;
+}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
new file mode 100644
index 0000000..97d3ba6
--- /dev/null
+++ b/kernel/debug/kdb/kdb_private.h
@@ -0,0 +1,300 @@
+#ifndef _KDBPRIVATE_H
+#define _KDBPRIVATE_H
+
+/*
+ * Kernel Debugger Architecture Independent Private Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/kgdb.h>
+#include "../debug_core.h"
+
+/* Kernel Debugger Error codes.  Must not overlap with command codes. */
+#define KDB_NOTFOUND	(-1)
+#define KDB_ARGCOUNT	(-2)
+#define KDB_BADWIDTH	(-3)
+#define KDB_BADRADIX	(-4)
+#define KDB_NOTENV	(-5)
+#define KDB_NOENVVALUE	(-6)
+#define KDB_NOTIMP	(-7)
+#define KDB_ENVFULL	(-8)
+#define KDB_ENVBUFFULL	(-9)
+#define KDB_TOOMANYBPT	(-10)
+#define KDB_TOOMANYDBREGS (-11)
+#define KDB_DUPBPT	(-12)
+#define KDB_BPTNOTFOUND	(-13)
+#define KDB_BADMODE	(-14)
+#define KDB_BADINT	(-15)
+#define KDB_INVADDRFMT  (-16)
+#define KDB_BADREG      (-17)
+#define KDB_BADCPUNUM   (-18)
+#define KDB_BADLENGTH	(-19)
+#define KDB_NOBP	(-20)
+#define KDB_BADADDR	(-21)
+
+/* Kernel Debugger Command codes.  Must not overlap with error codes. */
+#define KDB_CMD_GO	(-1001)
+#define KDB_CMD_CPU	(-1002)
+#define KDB_CMD_SS	(-1003)
+#define KDB_CMD_SSB	(-1004)
+#define KDB_CMD_KGDB (-1005)
+#define KDB_CMD_KGDB2 (-1006)
+
+/* Internal debug flags */
+#define KDB_DEBUG_FLAG_BP	0x0002	/* Breakpoint subsystem debug */
+#define KDB_DEBUG_FLAG_BB_SUMM	0x0004	/* Basic block analysis, summary only */
+#define KDB_DEBUG_FLAG_AR	0x0008	/* Activation record, generic */
+#define KDB_DEBUG_FLAG_ARA	0x0010	/* Activation record, arch specific */
+#define KDB_DEBUG_FLAG_BB	0x0020	/* All basic block analysis */
+#define KDB_DEBUG_FLAG_STATE	0x0040	/* State flags */
+#define KDB_DEBUG_FLAG_MASK	0xffff	/* All debug flags */
+#define KDB_DEBUG_FLAG_SHIFT	16	/* Shift factor for dbflags */
+
+#define KDB_DEBUG(flag)	(kdb_flags & \
+	(KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT))
+#define KDB_DEBUG_STATE(text, value) if (KDB_DEBUG(STATE)) \
+		kdb_print_state(text, value)
+
+#if BITS_PER_LONG == 32
+
+#define KDB_PLATFORM_ENV	"BYTESPERWORD=4"
+
+#define kdb_machreg_fmt		"0x%lx"
+#define kdb_machreg_fmt0	"0x%08lx"
+#define kdb_bfd_vma_fmt		"0x%lx"
+#define kdb_bfd_vma_fmt0	"0x%08lx"
+#define kdb_elfw_addr_fmt	"0x%x"
+#define kdb_elfw_addr_fmt0	"0x%08x"
+#define kdb_f_count_fmt		"%d"
+
+#elif BITS_PER_LONG == 64
+
+#define KDB_PLATFORM_ENV	"BYTESPERWORD=8"
+
+#define kdb_machreg_fmt		"0x%lx"
+#define kdb_machreg_fmt0	"0x%016lx"
+#define kdb_bfd_vma_fmt		"0x%lx"
+#define kdb_bfd_vma_fmt0	"0x%016lx"
+#define kdb_elfw_addr_fmt	"0x%x"
+#define kdb_elfw_addr_fmt0	"0x%016x"
+#define kdb_f_count_fmt		"%ld"
+
+#endif
+
+/*
+ * KDB_MAXBPT describes the total number of breakpoints
+ * supported by this architecure.
+ */
+#define KDB_MAXBPT	16
+
+/* Maximum number of arguments to a function  */
+#define KDB_MAXARGS    16
+
+typedef enum {
+	KDB_REPEAT_NONE = 0,	/* Do not repeat this command */
+	KDB_REPEAT_NO_ARGS,	/* Repeat the command without arguments */
+	KDB_REPEAT_WITH_ARGS,	/* Repeat the command including its arguments */
+} kdb_repeat_t;
+
+typedef int (*kdb_func_t)(int, const char **);
+
+/* Symbol table format returned by kallsyms. */
+typedef struct __ksymtab {
+		unsigned long value;	/* Address of symbol */
+		const char *mod_name;	/* Module containing symbol or
+					 * "kernel" */
+		unsigned long mod_start;
+		unsigned long mod_end;
+		const char *sec_name;	/* Section containing symbol */
+		unsigned long sec_start;
+		unsigned long sec_end;
+		const char *sym_name;	/* Full symbol name, including
+					 * any version */
+		unsigned long sym_start;
+		unsigned long sym_end;
+		} kdb_symtab_t;
+extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
+
+/* Exported Symbols for kernel loadable modules to use. */
+extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+			       short, kdb_repeat_t);
+extern int kdb_unregister(char *);
+
+extern int kdb_getarea_size(void *, unsigned long, size_t);
+extern int kdb_putarea_size(unsigned long, void *, size_t);
+
+/*
+ * Like get_user and put_user, kdb_getarea and kdb_putarea take variable
+ * names, not pointers.  The underlying *_size functions take pointers.
+ */
+#define kdb_getarea(x, addr) kdb_getarea_size(&(x), addr, sizeof((x)))
+#define kdb_putarea(addr, x) kdb_putarea_size(addr, &(x), sizeof((x)))
+
+extern int kdb_getphysword(unsigned long *word,
+			unsigned long addr, size_t size);
+extern int kdb_getword(unsigned long *, unsigned long, size_t);
+extern int kdb_putword(unsigned long, unsigned long, size_t);
+
+extern int kdbgetularg(const char *, unsigned long *);
+extern int kdb_set(int, const char **);
+extern char *kdbgetenv(const char *);
+extern int kdbgetintenv(const char *, int *);
+extern int kdbgetaddrarg(int, const char **, int*, unsigned long *,
+			 long *, char **);
+extern int kdbgetsymval(const char *, kdb_symtab_t *);
+extern int kdbnearsym(unsigned long, kdb_symtab_t *);
+extern void kdbnearsym_cleanup(void);
+extern char *kdb_strdup(const char *str, gfp_t type);
+extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int);
+
+/* Routine for debugging the debugger state. */
+extern void kdb_print_state(const char *, int);
+
+extern int kdb_state;
+#define KDB_STATE_KDB		0x00000001	/* Cpu is inside kdb */
+#define KDB_STATE_LEAVING	0x00000002	/* Cpu is leaving kdb */
+#define KDB_STATE_CMD		0x00000004	/* Running a kdb command */
+#define KDB_STATE_KDB_CONTROL	0x00000008	/* This cpu is under
+						 * kdb control */
+#define KDB_STATE_HOLD_CPU	0x00000010	/* Hold this cpu inside kdb */
+#define KDB_STATE_DOING_SS	0x00000020	/* Doing ss command */
+#define KDB_STATE_DOING_SSB	0x00000040	/* Doing ssb command,
+						 * DOING_SS is also set */
+#define KDB_STATE_SSBPT		0x00000080	/* Install breakpoint
+						 * after one ss, independent of
+						 * DOING_SS */
+#define KDB_STATE_REENTRY	0x00000100	/* Valid re-entry into kdb */
+#define KDB_STATE_SUPPRESS	0x00000200	/* Suppress error messages */
+#define KDB_STATE_PAGER		0x00000400	/* pager is available */
+#define KDB_STATE_GO_SWITCH	0x00000800	/* go is switching
+						 * back to initial cpu */
+#define KDB_STATE_PRINTF_LOCK	0x00001000	/* Holds kdb_printf lock */
+#define KDB_STATE_WAIT_IPI	0x00002000	/* Waiting for kdb_ipi() NMI */
+#define KDB_STATE_RECURSE	0x00004000	/* Recursive entry to kdb */
+#define KDB_STATE_IP_ADJUSTED	0x00008000	/* Restart IP has been
+						 * adjusted */
+#define KDB_STATE_GO1		0x00010000	/* go only releases one cpu */
+#define KDB_STATE_KEYBOARD	0x00020000	/* kdb entered via
+						 * keyboard on this cpu */
+#define KDB_STATE_KEXEC		0x00040000	/* kexec issued */
+#define KDB_STATE_DOING_KGDB	0x00080000	/* kgdb enter now issued */
+#define KDB_STATE_DOING_KGDB2	0x00100000	/* kgdb enter now issued */
+#define KDB_STATE_KGDB_TRANS	0x00200000	/* Transition to kgdb */
+#define KDB_STATE_ARCH		0xff000000	/* Reserved for arch
+						 * specific use */
+
+#define KDB_STATE(flag) (kdb_state & KDB_STATE_##flag)
+#define KDB_STATE_SET(flag) ((void)(kdb_state |= KDB_STATE_##flag))
+#define KDB_STATE_CLEAR(flag) ((void)(kdb_state &= ~KDB_STATE_##flag))
+
+extern int kdb_nextline; /* Current number of lines displayed */
+
+typedef struct _kdb_bp {
+	unsigned long	bp_addr;	/* Address breakpoint is present at */
+	unsigned int	bp_free:1;	/* This entry is available */
+	unsigned int	bp_enabled:1;	/* Breakpoint is active in register */
+	unsigned int	bp_type:4;	/* Uses hardware register */
+	unsigned int	bp_installed:1;	/* Breakpoint is installed */
+	unsigned int	bp_delay:1;	/* Do delayed bp handling */
+	unsigned int	bp_delayed:1;	/* Delayed breakpoint */
+	unsigned int	bph_length;	/* HW break length */
+} kdb_bp_t;
+
+#ifdef CONFIG_KGDB_KDB
+extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
+
+/* The KDB shell command table */
+typedef struct _kdbtab {
+	char    *cmd_name;		/* Command name */
+	kdb_func_t cmd_func;		/* Function to execute command */
+	char    *cmd_usage;		/* Usage String for this command */
+	char    *cmd_help;		/* Help message for this command */
+	short    cmd_flags;		/* Parsing flags */
+	short    cmd_minlen;		/* Minimum legal # command
+					 * chars required */
+	kdb_repeat_t cmd_repeat;	/* Does command auto repeat on enter? */
+} kdbtab_t;
+
+extern int kdb_bt(int, const char **);	/* KDB display back trace */
+
+/* KDB breakpoint management functions */
+extern void kdb_initbptab(void);
+extern void kdb_bp_install(struct pt_regs *);
+extern void kdb_bp_remove(void);
+
+typedef enum {
+	KDB_DB_BPT,	/* Breakpoint */
+	KDB_DB_SS,	/* Single-step trap */
+	KDB_DB_SSB,	/* Single step to branch */
+	KDB_DB_SSBPT,	/* Single step over breakpoint */
+	KDB_DB_NOBPT	/* Spurious breakpoint */
+} kdb_dbtrap_t;
+
+extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
+			 int, kdb_dbtrap_t, struct pt_regs *);
+
+/* Miscellaneous functions and data areas */
+extern int kdb_grepping_flag;
+extern char kdb_grep_string[];
+extern int kdb_grep_leading;
+extern int kdb_grep_trailing;
+extern char *kdb_cmds[];
+extern void kdb_syslog_data(char *syslog_data[]);
+extern unsigned long kdb_task_state_string(const char *);
+extern char kdb_task_state_char (const struct task_struct *);
+extern unsigned long kdb_task_state(const struct task_struct *p,
+				    unsigned long mask);
+extern void kdb_ps_suppressed(void);
+extern void kdb_ps1(const struct task_struct *p);
+extern void kdb_print_nameval(const char *name, unsigned long val);
+extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
+extern void kdb_meminfo_proc_show(void);
+extern const char *kdb_walk_kallsyms(loff_t *pos);
+extern char *kdb_getstr(char *, size_t, char *);
+
+/* Defines for kdb_symbol_print */
+#define KDB_SP_SPACEB	0x0001		/* Space before string */
+#define KDB_SP_SPACEA	0x0002		/* Space after string */
+#define KDB_SP_PAREN	0x0004		/* Parenthesis around string */
+#define KDB_SP_VALUE	0x0008		/* Print the value of the address */
+#define KDB_SP_SYMSIZE	0x0010		/* Print the size of the symbol */
+#define KDB_SP_NEWLINE	0x0020		/* Newline after string */
+#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN)
+
+#define KDB_TSK(cpu) kgdb_info[cpu].task
+#define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
+
+extern struct task_struct *kdb_curr_task(int);
+
+#define kdb_task_has_cpu(p) (task_curr(p))
+
+/* Simplify coexistence with NPTL */
+#define	kdb_do_each_thread(g, p) do_each_thread(g, p)
+#define	kdb_while_each_thread(g, p) while_each_thread(g, p)
+
+#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
+
+extern void *debug_kmalloc(size_t size, gfp_t flags);
+extern void debug_kfree(void *);
+extern void debug_kusage(void);
+
+extern void kdb_set_current_task(struct task_struct *);
+extern struct task_struct *kdb_current_task;
+#ifdef CONFIG_MODULES
+extern struct list_head *kdb_modules;
+#endif /* CONFIG_MODULES */
+
+extern char kdb_prompt_str[];
+
+#define	KDB_WORD_SIZE	((int)sizeof(unsigned long))
+
+#endif /* CONFIG_KGDB_KDB */
+#endif	/* !_KDBPRIVATE_H */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
new file mode 100644
index 0000000..45344d5c
--- /dev/null
+++ b/kernel/debug/kdb/kdb_support.c
@@ -0,0 +1,927 @@
+/*
+ * Kernel Debugger Architecture Independent Support Functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ * 03/02/13    added new 2.5 kallsyms <xavier.bru@bull.net>
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/kdb.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+/*
+ * kdbgetsymval - Return the address of the given symbol.
+ *
+ * Parameters:
+ *	symname	Character string containing symbol name
+ *      symtab  Structure to receive results
+ * Returns:
+ *	0	Symbol not found, symtab zero filled
+ *	1	Symbol mapped to module/symbol/section, data in symtab
+ */
+int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
+{
+	if (KDB_DEBUG(AR))
+		kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+			   symtab);
+	memset(symtab, 0, sizeof(*symtab));
+	symtab->sym_start = kallsyms_lookup_name(symname);
+	if (symtab->sym_start) {
+		if (KDB_DEBUG(AR))
+			kdb_printf("kdbgetsymval: returns 1, "
+				   "symtab->sym_start=0x%lx\n",
+				   symtab->sym_start);
+		return 1;
+	}
+	if (KDB_DEBUG(AR))
+		kdb_printf("kdbgetsymval: returns 0\n");
+	return 0;
+}
+EXPORT_SYMBOL(kdbgetsymval);
+
+static char *kdb_name_table[100];	/* arbitrary size */
+
+/*
+ * kdbnearsym -	Return the name of the symbol with the nearest address
+ *	less than 'addr'.
+ *
+ * Parameters:
+ *	addr	Address to check for symbol near
+ *	symtab  Structure to receive results
+ * Returns:
+ *	0	No sections contain this address, symtab zero filled
+ *	1	Address mapped to module/symbol/section, data in symtab
+ * Remarks:
+ *	2.6 kallsyms has a "feature" where it unpacks the name into a
+ *	string.  If that string is reused before the caller expects it
+ *	then the caller sees its string change without warning.  To
+ *	avoid cluttering up the main kdb code with lots of kdb_strdup,
+ *	tests and kfree calls, kdbnearsym maintains an LRU list of the
+ *	last few unique strings.  The list is sized large enough to
+ *	hold active strings, no kdb caller of kdbnearsym makes more
+ *	than ~20 later calls before using a saved value.
+ */
+int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
+{
+	int ret = 0;
+	unsigned long symbolsize;
+	unsigned long offset;
+#define knt1_size 128		/* must be >= kallsyms table size */
+	char *knt1 = NULL;
+
+	if (KDB_DEBUG(AR))
+		kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+	memset(symtab, 0, sizeof(*symtab));
+
+	if (addr < 4096)
+		goto out;
+	knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
+	if (!knt1) {
+		kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n",
+			   addr);
+		goto out;
+	}
+	symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset,
+				(char **)(&symtab->mod_name), knt1);
+	if (offset > 8*1024*1024) {
+		symtab->sym_name = NULL;
+		addr = offset = symbolsize = 0;
+	}
+	symtab->sym_start = addr - offset;
+	symtab->sym_end = symtab->sym_start + symbolsize;
+	ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
+
+	if (ret) {
+		int i;
+		/* Another 2.6 kallsyms "feature".  Sometimes the sym_name is
+		 * set but the buffer passed into kallsyms_lookup is not used,
+		 * so it contains garbage.  The caller has to work out which
+		 * buffer needs to be saved.
+		 *
+		 * What was Rusty smoking when he wrote that code?
+		 */
+		if (symtab->sym_name != knt1) {
+			strncpy(knt1, symtab->sym_name, knt1_size);
+			knt1[knt1_size-1] = '\0';
+		}
+		for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+			if (kdb_name_table[i] &&
+			    strcmp(kdb_name_table[i], knt1) == 0)
+				break;
+		}
+		if (i >= ARRAY_SIZE(kdb_name_table)) {
+			debug_kfree(kdb_name_table[0]);
+			memcpy(kdb_name_table, kdb_name_table+1,
+			       sizeof(kdb_name_table[0]) *
+			       (ARRAY_SIZE(kdb_name_table)-1));
+		} else {
+			debug_kfree(knt1);
+			knt1 = kdb_name_table[i];
+			memcpy(kdb_name_table+i, kdb_name_table+i+1,
+			       sizeof(kdb_name_table[0]) *
+			       (ARRAY_SIZE(kdb_name_table)-i-1));
+		}
+		i = ARRAY_SIZE(kdb_name_table) - 1;
+		kdb_name_table[i] = knt1;
+		symtab->sym_name = kdb_name_table[i];
+		knt1 = NULL;
+	}
+
+	if (symtab->mod_name == NULL)
+		symtab->mod_name = "kernel";
+	if (KDB_DEBUG(AR))
+		kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
+		   "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+		   symtab->sym_start, symtab->mod_name, symtab->sym_name,
+		   symtab->sym_name);
+
+out:
+	debug_kfree(knt1);
+	return ret;
+}
+
+void kdbnearsym_cleanup(void)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+		if (kdb_name_table[i]) {
+			debug_kfree(kdb_name_table[i]);
+			kdb_name_table[i] = NULL;
+		}
+	}
+}
+
+static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
+
+/*
+ * kallsyms_symbol_complete
+ *
+ * Parameters:
+ *	prefix_name	prefix of a symbol name to lookup
+ *	max_len		maximum length that can be returned
+ * Returns:
+ *	Number of symbols which match the given prefix.
+ * Notes:
+ *	prefix_name is changed to contain the longest unique prefix that
+ *	starts with this prefix (tab completion).
+ */
+int kallsyms_symbol_complete(char *prefix_name, int max_len)
+{
+	loff_t pos = 0;
+	int prefix_len = strlen(prefix_name), prev_len = 0;
+	int i, number = 0;
+	const char *name;
+
+	while ((name = kdb_walk_kallsyms(&pos))) {
+		if (strncmp(name, prefix_name, prefix_len) == 0) {
+			strcpy(ks_namebuf, name);
+			/* Work out the longest name that matches the prefix */
+			if (++number == 1) {
+				prev_len = min_t(int, max_len-1,
+						 strlen(ks_namebuf));
+				memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
+				ks_namebuf_prev[prev_len] = '\0';
+				continue;
+			}
+			for (i = 0; i < prev_len; i++) {
+				if (ks_namebuf[i] != ks_namebuf_prev[i]) {
+					prev_len = i;
+					ks_namebuf_prev[i] = '\0';
+					break;
+				}
+			}
+		}
+	}
+	if (prev_len > prefix_len)
+		memcpy(prefix_name, ks_namebuf_prev, prev_len+1);
+	return number;
+}
+
+/*
+ * kallsyms_symbol_next
+ *
+ * Parameters:
+ *	prefix_name	prefix of a symbol name to lookup
+ *	flag	0 means search from the head, 1 means continue search.
+ * Returns:
+ *	1 if a symbol matches the given prefix.
+ *	0 if no string found
+ */
+int kallsyms_symbol_next(char *prefix_name, int flag)
+{
+	int prefix_len = strlen(prefix_name);
+	static loff_t pos;
+	const char *name;
+
+	if (!flag)
+		pos = 0;
+
+	while ((name = kdb_walk_kallsyms(&pos))) {
+		if (strncmp(name, prefix_name, prefix_len) == 0) {
+			strncpy(prefix_name, name, strlen(name)+1);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * kdb_symbol_print - Standard method for printing a symbol name and offset.
+ * Inputs:
+ *	addr	Address to be printed.
+ *	symtab	Address of symbol data, if NULL this routine does its
+ *		own lookup.
+ *	punc	Punctuation for string, bit field.
+ * Remarks:
+ *	The string and its punctuation is only printed if the address
+ *	is inside the kernel, except that the value is always printed
+ *	when requested.
+ */
+void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p,
+		      unsigned int punc)
+{
+	kdb_symtab_t symtab, *symtab_p2;
+	if (symtab_p) {
+		symtab_p2 = (kdb_symtab_t *)symtab_p;
+	} else {
+		symtab_p2 = &symtab;
+		kdbnearsym(addr, symtab_p2);
+	}
+	if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE)))
+		return;
+	if (punc & KDB_SP_SPACEB)
+		kdb_printf(" ");
+	if (punc & KDB_SP_VALUE)
+		kdb_printf(kdb_machreg_fmt0, addr);
+	if (symtab_p2->sym_name) {
+		if (punc & KDB_SP_VALUE)
+			kdb_printf(" ");
+		if (punc & KDB_SP_PAREN)
+			kdb_printf("(");
+		if (strcmp(symtab_p2->mod_name, "kernel"))
+			kdb_printf("[%s]", symtab_p2->mod_name);
+		kdb_printf("%s", symtab_p2->sym_name);
+		if (addr != symtab_p2->sym_start)
+			kdb_printf("+0x%lx", addr - symtab_p2->sym_start);
+		if (punc & KDB_SP_SYMSIZE)
+			kdb_printf("/0x%lx",
+				   symtab_p2->sym_end - symtab_p2->sym_start);
+		if (punc & KDB_SP_PAREN)
+			kdb_printf(")");
+	}
+	if (punc & KDB_SP_SPACEA)
+		kdb_printf(" ");
+	if (punc & KDB_SP_NEWLINE)
+		kdb_printf("\n");
+}
+
+/*
+ * kdb_strdup - kdb equivalent of strdup, for disasm code.
+ * Inputs:
+ *	str	The string to duplicate.
+ *	type	Flags to kmalloc for the new string.
+ * Returns:
+ *	Address of the new string, NULL if storage could not be allocated.
+ * Remarks:
+ *	This is not in lib/string.c because it uses kmalloc which is not
+ *	available when string.o is used in boot loaders.
+ */
+char *kdb_strdup(const char *str, gfp_t type)
+{
+	int n = strlen(str)+1;
+	char *s = kmalloc(n, type);
+	if (!s)
+		return NULL;
+	return strcpy(s, str);
+}
+
+/*
+ * kdb_getarea_size - Read an area of data.  The kdb equivalent of
+ *	copy_from_user, with kdb messages for invalid addresses.
+ * Inputs:
+ *	res	Pointer to the area to receive the result.
+ *	addr	Address of the area to copy.
+ *	size	Size of the area.
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+int kdb_getarea_size(void *res, unsigned long addr, size_t size)
+{
+	int ret = probe_kernel_read((char *)res, (char *)addr, size);
+	if (ret) {
+		if (!KDB_STATE(SUPPRESS)) {
+			kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
+			KDB_STATE_SET(SUPPRESS);
+		}
+		ret = KDB_BADADDR;
+	} else {
+		KDB_STATE_CLEAR(SUPPRESS);
+	}
+	return ret;
+}
+
+/*
+ * kdb_putarea_size - Write an area of data.  The kdb equivalent of
+ *	copy_to_user, with kdb messages for invalid addresses.
+ * Inputs:
+ *	addr	Address of the area to write to.
+ *	res	Pointer to the area holding the data.
+ *	size	Size of the area.
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+int kdb_putarea_size(unsigned long addr, void *res, size_t size)
+{
+	int ret = probe_kernel_read((char *)addr, (char *)res, size);
+	if (ret) {
+		if (!KDB_STATE(SUPPRESS)) {
+			kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
+			KDB_STATE_SET(SUPPRESS);
+		}
+		ret = KDB_BADADDR;
+	} else {
+		KDB_STATE_CLEAR(SUPPRESS);
+	}
+	return ret;
+}
+
+/*
+ * kdb_getphys - Read data from a physical address. Validate the
+ * 	address is in range, use kmap_atomic() to get data
+ * 	similar to kdb_getarea() - but for phys addresses
+ * Inputs:
+ * 	res	Pointer to the word to receive the result
+ * 	addr	Physical address of the area to copy
+ * 	size	Size of the area
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+static int kdb_getphys(void *res, unsigned long addr, size_t size)
+{
+	unsigned long pfn;
+	void *vaddr;
+	struct page *page;
+
+	pfn = (addr >> PAGE_SHIFT);
+	if (!pfn_valid(pfn))
+		return 1;
+	page = pfn_to_page(pfn);
+	vaddr = kmap_atomic(page, KM_KDB);
+	memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
+	kunmap_atomic(vaddr, KM_KDB);
+
+	return 0;
+}
+
+/*
+ * kdb_getphysword
+ * Inputs:
+ *	word	Pointer to the word to receive the result.
+ *	addr	Address of the area to copy.
+ *	size	Size of the area.
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
+{
+	int diag;
+	__u8  w1;
+	__u16 w2;
+	__u32 w4;
+	__u64 w8;
+	*word = 0;	/* Default value if addr or size is invalid */
+
+	switch (size) {
+	case 1:
+		diag = kdb_getphys(&w1, addr, sizeof(w1));
+		if (!diag)
+			*word = w1;
+		break;
+	case 2:
+		diag = kdb_getphys(&w2, addr, sizeof(w2));
+		if (!diag)
+			*word = w2;
+		break;
+	case 4:
+		diag = kdb_getphys(&w4, addr, sizeof(w4));
+		if (!diag)
+			*word = w4;
+		break;
+	case 8:
+		if (size <= sizeof(*word)) {
+			diag = kdb_getphys(&w8, addr, sizeof(w8));
+			if (!diag)
+				*word = w8;
+			break;
+		}
+		/* drop through */
+	default:
+		diag = KDB_BADWIDTH;
+		kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
+	}
+	return diag;
+}
+
+/*
+ * kdb_getword - Read a binary value.  Unlike kdb_getarea, this treats
+ *	data as numbers.
+ * Inputs:
+ *	word	Pointer to the word to receive the result.
+ *	addr	Address of the area to copy.
+ *	size	Size of the area.
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
+{
+	int diag;
+	__u8  w1;
+	__u16 w2;
+	__u32 w4;
+	__u64 w8;
+	*word = 0;	/* Default value if addr or size is invalid */
+	switch (size) {
+	case 1:
+		diag = kdb_getarea(w1, addr);
+		if (!diag)
+			*word = w1;
+		break;
+	case 2:
+		diag = kdb_getarea(w2, addr);
+		if (!diag)
+			*word = w2;
+		break;
+	case 4:
+		diag = kdb_getarea(w4, addr);
+		if (!diag)
+			*word = w4;
+		break;
+	case 8:
+		if (size <= sizeof(*word)) {
+			diag = kdb_getarea(w8, addr);
+			if (!diag)
+				*word = w8;
+			break;
+		}
+		/* drop through */
+	default:
+		diag = KDB_BADWIDTH;
+		kdb_printf("kdb_getword: bad width %ld\n", (long) size);
+	}
+	return diag;
+}
+
+/*
+ * kdb_putword - Write a binary value.  Unlike kdb_putarea, this
+ *	treats data as numbers.
+ * Inputs:
+ *	addr	Address of the area to write to..
+ *	word	The value to set.
+ *	size	Size of the area.
+ * Returns:
+ *	0 for success, < 0 for error.
+ */
+int kdb_putword(unsigned long addr, unsigned long word, size_t size)
+{
+	int diag;
+	__u8  w1;
+	__u16 w2;
+	__u32 w4;
+	__u64 w8;
+	switch (size) {
+	case 1:
+		w1 = word;
+		diag = kdb_putarea(addr, w1);
+		break;
+	case 2:
+		w2 = word;
+		diag = kdb_putarea(addr, w2);
+		break;
+	case 4:
+		w4 = word;
+		diag = kdb_putarea(addr, w4);
+		break;
+	case 8:
+		if (size <= sizeof(word)) {
+			w8 = word;
+			diag = kdb_putarea(addr, w8);
+			break;
+		}
+		/* drop through */
+	default:
+		diag = KDB_BADWIDTH;
+		kdb_printf("kdb_putword: bad width %ld\n", (long) size);
+	}
+	return diag;
+}
+
+/*
+ * kdb_task_state_string - Convert a string containing any of the
+ *	letters DRSTCZEUIMA to a mask for the process state field and
+ *	return the value.  If no argument is supplied, return the mask
+ *	that corresponds to environment variable PS, DRSTCZEU by
+ *	default.
+ * Inputs:
+ *	s	String to convert
+ * Returns:
+ *	Mask for process state.
+ * Notes:
+ *	The mask folds data from several sources into a single long value, so
+ *	be carefull not to overlap the bits.  TASK_* bits are in the LSB,
+ *	special cases like UNRUNNABLE are in the MSB.  As of 2.6.10-rc1 there
+ *	is no overlap between TASK_* and EXIT_* but that may not always be
+ *	true, so EXIT_* bits are shifted left 16 bits before being stored in
+ *	the mask.
+ */
+
+/* unrunnable is < 0 */
+#define UNRUNNABLE	(1UL << (8*sizeof(unsigned long) - 1))
+#define RUNNING		(1UL << (8*sizeof(unsigned long) - 2))
+#define IDLE		(1UL << (8*sizeof(unsigned long) - 3))
+#define DAEMON		(1UL << (8*sizeof(unsigned long) - 4))
+
+unsigned long kdb_task_state_string(const char *s)
+{
+	long res = 0;
+	if (!s) {
+		s = kdbgetenv("PS");
+		if (!s)
+			s = "DRSTCZEU";	/* default value for ps */
+	}
+	while (*s) {
+		switch (*s) {
+		case 'D':
+			res |= TASK_UNINTERRUPTIBLE;
+			break;
+		case 'R':
+			res |= RUNNING;
+			break;
+		case 'S':
+			res |= TASK_INTERRUPTIBLE;
+			break;
+		case 'T':
+			res |= TASK_STOPPED;
+			break;
+		case 'C':
+			res |= TASK_TRACED;
+			break;
+		case 'Z':
+			res |= EXIT_ZOMBIE << 16;
+			break;
+		case 'E':
+			res |= EXIT_DEAD << 16;
+			break;
+		case 'U':
+			res |= UNRUNNABLE;
+			break;
+		case 'I':
+			res |= IDLE;
+			break;
+		case 'M':
+			res |= DAEMON;
+			break;
+		case 'A':
+			res = ~0UL;
+			break;
+		default:
+			  kdb_printf("%s: unknown flag '%c' ignored\n",
+				     __func__, *s);
+			  break;
+		}
+		++s;
+	}
+	return res;
+}
+
+/*
+ * kdb_task_state_char - Return the character that represents the task state.
+ * Inputs:
+ *	p	struct task for the process
+ * Returns:
+ *	One character to represent the task state.
+ */
+char kdb_task_state_char (const struct task_struct *p)
+{
+	int cpu;
+	char state;
+	unsigned long tmp;
+
+	if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+		return 'E';
+
+	cpu = kdb_process_cpu(p);
+	state = (p->state == 0) ? 'R' :
+		(p->state < 0) ? 'U' :
+		(p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
+		(p->state & TASK_STOPPED) ? 'T' :
+		(p->state & TASK_TRACED) ? 'C' :
+		(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
+		(p->exit_state & EXIT_DEAD) ? 'E' :
+		(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+	if (p->pid == 0) {
+		/* Idle task.  Is it really idle, apart from the kdb
+		 * interrupt? */
+		if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
+			if (cpu != kdb_initial_cpu)
+				state = 'I';	/* idle task */
+		}
+	} else if (!p->mm && state == 'S') {
+		state = 'M';	/* sleeping system daemon */
+	}
+	return state;
+}
+
+/*
+ * kdb_task_state - Return true if a process has the desired state
+ *	given by the mask.
+ * Inputs:
+ *	p	struct task for the process
+ *	mask	mask from kdb_task_state_string to select processes
+ * Returns:
+ *	True if the process matches at least one criteria defined by the mask.
+ */
+unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
+{
+	char state[] = { kdb_task_state_char(p), '\0' };
+	return (mask & kdb_task_state_string(state)) != 0;
+}
+
+/*
+ * kdb_print_nameval - Print a name and its value, converting the
+ *	value to a symbol lookup if possible.
+ * Inputs:
+ *	name	field name to print
+ *	val	value of field
+ */
+void kdb_print_nameval(const char *name, unsigned long val)
+{
+	kdb_symtab_t symtab;
+	kdb_printf("  %-11.11s ", name);
+	if (kdbnearsym(val, &symtab))
+		kdb_symbol_print(val, &symtab,
+				 KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE);
+	else
+		kdb_printf("0x%lx\n", val);
+}
+
+/* Last ditch allocator for debugging, so we can still debug even when
+ * the GFP_ATOMIC pool has been exhausted.  The algorithms are tuned
+ * for space usage, not for speed.  One smallish memory pool, the free
+ * chain is always in ascending address order to allow coalescing,
+ * allocations are done in brute force best fit.
+ */
+
+struct debug_alloc_header {
+	u32 next;	/* offset of next header from start of pool */
+	u32 size;
+	void *caller;
+};
+
+/* The memory returned by this allocator must be aligned, which means
+ * so must the header size.  Do not assume that sizeof(struct
+ * debug_alloc_header) is a multiple of the alignment, explicitly
+ * calculate the overhead of this header, including the alignment.
+ * The rest of this code must not use sizeof() on any header or
+ * pointer to a header.
+ */
+#define dah_align 8
+#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
+
+static u64 debug_alloc_pool_aligned[256*1024/dah_align];	/* 256K pool */
+static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
+static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max;
+
+/* Locking is awkward.  The debug code is called from all contexts,
+ * including non maskable interrupts.  A normal spinlock is not safe
+ * in NMI context.  Try to get the debug allocator lock, if it cannot
+ * be obtained after a second then give up.  If the lock could not be
+ * previously obtained on this cpu then only try once.
+ *
+ * sparse has no annotation for "this function _sometimes_ acquires a
+ * lock", so fudge the acquire/release notation.
+ */
+static DEFINE_SPINLOCK(dap_lock);
+static int get_dap_lock(void)
+	__acquires(dap_lock)
+{
+	static int dap_locked = -1;
+	int count;
+	if (dap_locked == smp_processor_id())
+		count = 1;
+	else
+		count = 1000;
+	while (1) {
+		if (spin_trylock(&dap_lock)) {
+			dap_locked = -1;
+			return 1;
+		}
+		if (!count--)
+			break;
+		udelay(1000);
+	}
+	dap_locked = smp_processor_id();
+	__acquire(dap_lock);
+	return 0;
+}
+
+void *debug_kmalloc(size_t size, gfp_t flags)
+{
+	unsigned int rem, h_offset;
+	struct debug_alloc_header *best, *bestprev, *prev, *h;
+	void *p = NULL;
+	if (!get_dap_lock()) {
+		__release(dap_lock);	/* we never actually got it */
+		return NULL;
+	}
+	h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+	if (dah_first_call) {
+		h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
+		dah_first_call = 0;
+	}
+	size = ALIGN(size, dah_align);
+	prev = best = bestprev = NULL;
+	while (1) {
+		if (h->size >= size && (!best || h->size < best->size)) {
+			best = h;
+			bestprev = prev;
+			if (h->size == size)
+				break;
+		}
+		if (!h->next)
+			break;
+		prev = h;
+		h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
+	}
+	if (!best)
+		goto out;
+	rem = best->size - size;
+	/* The pool must always contain at least one header */
+	if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
+		goto out;
+	if (rem >= dah_overhead) {
+		best->size = size;
+		h_offset = ((char *)best - debug_alloc_pool) +
+			   dah_overhead + best->size;
+		h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
+		h->size = rem - dah_overhead;
+		h->next = best->next;
+	} else
+		h_offset = best->next;
+	best->caller = __builtin_return_address(0);
+	dah_used += best->size;
+	dah_used_max = max(dah_used, dah_used_max);
+	if (bestprev)
+		bestprev->next = h_offset;
+	else
+		dah_first = h_offset;
+	p = (char *)best + dah_overhead;
+	memset(p, POISON_INUSE, best->size - 1);
+	*((char *)p + best->size - 1) = POISON_END;
+out:
+	spin_unlock(&dap_lock);
+	return p;
+}
+
+void debug_kfree(void *p)
+{
+	struct debug_alloc_header *h;
+	unsigned int h_offset;
+	if (!p)
+		return;
+	if ((char *)p < debug_alloc_pool ||
+	    (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
+		kfree(p);
+		return;
+	}
+	if (!get_dap_lock()) {
+		__release(dap_lock);	/* we never actually got it */
+		return;		/* memory leak, cannot be helped */
+	}
+	h = (struct debug_alloc_header *)((char *)p - dah_overhead);
+	memset(p, POISON_FREE, h->size - 1);
+	*((char *)p + h->size - 1) = POISON_END;
+	h->caller = NULL;
+	dah_used -= h->size;
+	h_offset = (char *)h - debug_alloc_pool;
+	if (h_offset < dah_first) {
+		h->next = dah_first;
+		dah_first = h_offset;
+	} else {
+		struct debug_alloc_header *prev;
+		unsigned int prev_offset;
+		prev = (struct debug_alloc_header *)(debug_alloc_pool +
+						     dah_first);
+		while (1) {
+			if (!prev->next || prev->next > h_offset)
+				break;
+			prev = (struct debug_alloc_header *)
+				(debug_alloc_pool + prev->next);
+		}
+		prev_offset = (char *)prev - debug_alloc_pool;
+		if (prev_offset + dah_overhead + prev->size == h_offset) {
+			prev->size += dah_overhead + h->size;
+			memset(h, POISON_FREE, dah_overhead - 1);
+			*((char *)h + dah_overhead - 1) = POISON_END;
+			h = prev;
+			h_offset = prev_offset;
+		} else {
+			h->next = prev->next;
+			prev->next = h_offset;
+		}
+	}
+	if (h_offset + dah_overhead + h->size == h->next) {
+		struct debug_alloc_header *next;
+		next = (struct debug_alloc_header *)
+			(debug_alloc_pool + h->next);
+		h->size += dah_overhead + next->size;
+		h->next = next->next;
+		memset(next, POISON_FREE, dah_overhead - 1);
+		*((char *)next + dah_overhead - 1) = POISON_END;
+	}
+	spin_unlock(&dap_lock);
+}
+
+void debug_kusage(void)
+{
+	struct debug_alloc_header *h_free, *h_used;
+#ifdef	CONFIG_IA64
+	/* FIXME: using dah for ia64 unwind always results in a memory leak.
+	 * Fix that memory leak first, then set debug_kusage_one_time = 1 for
+	 * all architectures.
+	 */
+	static int debug_kusage_one_time;
+#else
+	static int debug_kusage_one_time = 1;
+#endif
+	if (!get_dap_lock()) {
+		__release(dap_lock);	/* we never actually got it */
+		return;
+	}
+	h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+	if (dah_first == 0 &&
+	    (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
+	     dah_first_call))
+		goto out;
+	if (!debug_kusage_one_time)
+		goto out;
+	debug_kusage_one_time = 0;
+	kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n",
+		   __func__, dah_first);
+	if (dah_first) {
+		h_used = (struct debug_alloc_header *)debug_alloc_pool;
+		kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+			   h_used->size);
+	}
+	do {
+		h_used = (struct debug_alloc_header *)
+			  ((char *)h_free + dah_overhead + h_free->size);
+		kdb_printf("%s: h_used %p size %d caller %p\n",
+			   __func__, h_used, h_used->size, h_used->caller);
+		h_free = (struct debug_alloc_header *)
+			  (debug_alloc_pool + h_free->next);
+	} while (h_free->next);
+	h_used = (struct debug_alloc_header *)
+		  ((char *)h_free + dah_overhead + h_free->size);
+	if ((char *)h_used - debug_alloc_pool !=
+	    sizeof(debug_alloc_pool_aligned))
+		kdb_printf("%s: h_used %p size %d caller %p\n",
+			   __func__, h_used, h_used->size, h_used->caller);
+out:
+	spin_unlock(&dap_lock);
+}
+
+/* Maintain a small stack of kdb_flags to allow recursion without disturbing
+ * the global kdb state.
+ */
+
+static int kdb_flags_stack[4], kdb_flags_index;
+
+void kdb_save_flags(void)
+{
+	BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
+	kdb_flags_stack[kdb_flags_index++] = kdb_flags;
+}
+
+void kdb_restore_flags(void)
+{
+	BUG_ON(kdb_flags_index <= 0);
+	kdb_flags = kdb_flags_stack[--kdb_flags_index];
+}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 13aff29..6f6d091 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/fs.h>
+#include <linux/kdb.h>
 #include <linux/err.h>
 #include <linux/proc_fs.h>
 #include <linux/sched.h>	/* for cond_resched */
@@ -516,6 +517,26 @@
 	return ret;
 }
 
+#ifdef	CONFIG_KGDB_KDB
+const char *kdb_walk_kallsyms(loff_t *pos)
+{
+	static struct kallsym_iter kdb_walk_kallsyms_iter;
+	if (*pos == 0) {
+		memset(&kdb_walk_kallsyms_iter, 0,
+		       sizeof(kdb_walk_kallsyms_iter));
+		reset_iter(&kdb_walk_kallsyms_iter, 0);
+	}
+	while (1) {
+		if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
+			return NULL;
+		++*pos;
+		/* Some debugging symbols have no name.  Ignore them. */
+		if (kdb_walk_kallsyms_iter.name[0])
+			return kdb_walk_kallsyms_iter.name;
+	}
+}
+#endif	/* CONFIG_KGDB_KDB */
+
 static const struct file_operations kallsyms_operations = {
 	.open = kallsyms_open,
 	.read = seq_read,
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
deleted file mode 100644
index 11f3515..0000000
--- a/kernel/kgdb.c
+++ /dev/null
@@ -1,1764 +0,0 @@
-/*
- * KGDB stub.
- *
- * Maintainer: Jason Wessel <jason.wessel@windriver.com>
- *
- * Copyright (C) 2000-2001 VERITAS Software Corporation.
- * Copyright (C) 2002-2004 Timesys Corporation
- * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
- * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
- * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
- * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
- * Copyright (C) 2005-2008 Wind River Systems, Inc.
- * Copyright (C) 2007 MontaVista Software, Inc.
- * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * Contributors at various stages not listed above:
- *  Jason Wessel ( jason.wessel@windriver.com )
- *  George Anzinger <george@mvista.com>
- *  Anurekh Saxena (anurekh.saxena@timesys.com)
- *  Lake Stevens Instrument Division (Glenn Engel)
- *  Jim Kingdon, Cygnus Support.
- *
- * Original KGDB stub: David Grothe <dave@gcom.com>,
- * Tigran Aivazian <tigran@sco.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#include <linux/pid_namespace.h>
-#include <linux/clocksource.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/console.h>
-#include <linux/threads.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
-#include <linux/kgdb.h>
-#include <linux/pid.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-static int kgdb_break_asap;
-
-#define KGDB_MAX_THREAD_QUERY 17
-struct kgdb_state {
-	int			ex_vector;
-	int			signo;
-	int			err_code;
-	int			cpu;
-	int			pass_exception;
-	unsigned long		thr_query;
-	unsigned long		threadid;
-	long			kgdb_usethreadid;
-	struct pt_regs		*linux_regs;
-};
-
-/* Exception state values */
-#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
-#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
-#define DCPU_IS_SLAVE    0x4 /* Slave cpu enter exception */
-#define DCPU_SSTEP       0x8 /* CPU is single stepping */
-
-static struct debuggerinfo_struct {
-	void			*debuggerinfo;
-	struct task_struct	*task;
-	int 			exception_state;
-} kgdb_info[NR_CPUS];
-
-/**
- * kgdb_connected - Is a host GDB connected to us?
- */
-int				kgdb_connected;
-EXPORT_SYMBOL_GPL(kgdb_connected);
-
-/* All the KGDB handlers are installed */
-static int			kgdb_io_module_registered;
-
-/* Guard for recursive entry */
-static int			exception_level;
-
-static struct kgdb_io		*kgdb_io_ops;
-static DEFINE_SPINLOCK(kgdb_registration_lock);
-
-/* kgdb console driver is loaded */
-static int kgdb_con_registered;
-/* determine if kgdb console output should be used */
-static int kgdb_use_con;
-
-static int __init opt_kgdb_con(char *str)
-{
-	kgdb_use_con = 1;
-	return 0;
-}
-
-early_param("kgdbcon", opt_kgdb_con);
-
-module_param(kgdb_use_con, int, 0644);
-
-/*
- * Holds information about breakpoints in a kernel. These breakpoints are
- * added and removed by gdb.
- */
-static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
-	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
-};
-
-/*
- * The CPU# of the active CPU, or -1 if none:
- */
-atomic_t			kgdb_active = ATOMIC_INIT(-1);
-
-/*
- * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
- * bootup code (which might not have percpu set up yet):
- */
-static atomic_t			passive_cpu_wait[NR_CPUS];
-static atomic_t			cpu_in_kgdb[NR_CPUS];
-atomic_t			kgdb_setting_breakpoint;
-
-struct task_struct		*kgdb_usethread;
-struct task_struct		*kgdb_contthread;
-
-int				kgdb_single_step;
-pid_t				kgdb_sstep_pid;
-
-/* Our I/O buffers. */
-static char			remcom_in_buffer[BUFMAX];
-static char			remcom_out_buffer[BUFMAX];
-
-/* Storage for the registers, in GDB format. */
-static unsigned long		gdb_regs[(NUMREGBYTES +
-					sizeof(unsigned long) - 1) /
-					sizeof(unsigned long)];
-
-/* to keep track of the CPU which is doing the single stepping*/
-atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-
-/*
- * If you are debugging a problem where roundup (the collection of
- * all other CPUs) is a problem [this should be extremely rare],
- * then use the nokgdbroundup option to avoid roundup. In that case
- * the other CPUs might interfere with your debugging context, so
- * use this with care:
- */
-static int kgdb_do_roundup = 1;
-
-static int __init opt_nokgdbroundup(char *str)
-{
-	kgdb_do_roundup = 0;
-
-	return 0;
-}
-
-early_param("nokgdbroundup", opt_nokgdbroundup);
-
-/*
- * Finally, some KGDB code :-)
- */
-
-/*
- * Weak aliases for breakpoint management,
- * can be overriden by architectures when needed:
- */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
-{
-	int err;
-
-	err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
-	if (err)
-		return err;
-
-	return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
-				  BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
-{
-	return probe_kernel_write((char *)addr,
-				  (char *)bundle, BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_validate_break_address(unsigned long addr)
-{
-	char tmp_variable[BREAK_INSTR_SIZE];
-	int err;
-	/* Validate setting the breakpoint and then removing it.  In the
-	 * remove fails, the kernel needs to emit a bad message because we
-	 * are deep trouble not being able to put things back the way we
-	 * found them.
-	 */
-	err = kgdb_arch_set_breakpoint(addr, tmp_variable);
-	if (err)
-		return err;
-	err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
-	if (err)
-		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
-		   "memory destroyed at: %lx", addr);
-	return err;
-}
-
-unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
-{
-	return instruction_pointer(regs);
-}
-
-int __weak kgdb_arch_init(void)
-{
-	return 0;
-}
-
-int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
-{
-	return 0;
-}
-
-void __weak
-kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
-	return;
-}
-
-/**
- *	kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
- *	@regs: Current &struct pt_regs.
- *
- *	This function will be called if the particular architecture must
- *	disable hardware debugging while it is processing gdb packets or
- *	handling exception.
- */
-void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
-{
-}
-
-/*
- * GDB remote protocol parser:
- */
-
-static int hex(char ch)
-{
-	if ((ch >= 'a') && (ch <= 'f'))
-		return ch - 'a' + 10;
-	if ((ch >= '0') && (ch <= '9'))
-		return ch - '0';
-	if ((ch >= 'A') && (ch <= 'F'))
-		return ch - 'A' + 10;
-	return -1;
-}
-
-/* scan for the sequence $<data>#<checksum> */
-static void get_packet(char *buffer)
-{
-	unsigned char checksum;
-	unsigned char xmitcsum;
-	int count;
-	char ch;
-
-	do {
-		/*
-		 * Spin and wait around for the start character, ignore all
-		 * other characters:
-		 */
-		while ((ch = (kgdb_io_ops->read_char())) != '$')
-			/* nothing */;
-
-		kgdb_connected = 1;
-		checksum = 0;
-		xmitcsum = -1;
-
-		count = 0;
-
-		/*
-		 * now, read until a # or end of buffer is found:
-		 */
-		while (count < (BUFMAX - 1)) {
-			ch = kgdb_io_ops->read_char();
-			if (ch == '#')
-				break;
-			checksum = checksum + ch;
-			buffer[count] = ch;
-			count = count + 1;
-		}
-		buffer[count] = 0;
-
-		if (ch == '#') {
-			xmitcsum = hex(kgdb_io_ops->read_char()) << 4;
-			xmitcsum += hex(kgdb_io_ops->read_char());
-
-			if (checksum != xmitcsum)
-				/* failed checksum */
-				kgdb_io_ops->write_char('-');
-			else
-				/* successful transfer */
-				kgdb_io_ops->write_char('+');
-			if (kgdb_io_ops->flush)
-				kgdb_io_ops->flush();
-		}
-	} while (checksum != xmitcsum);
-}
-
-/*
- * Send the packet in buffer.
- * Check for gdb connection if asked for.
- */
-static void put_packet(char *buffer)
-{
-	unsigned char checksum;
-	int count;
-	char ch;
-
-	/*
-	 * $<packet info>#<checksum>.
-	 */
-	while (1) {
-		kgdb_io_ops->write_char('$');
-		checksum = 0;
-		count = 0;
-
-		while ((ch = buffer[count])) {
-			kgdb_io_ops->write_char(ch);
-			checksum += ch;
-			count++;
-		}
-
-		kgdb_io_ops->write_char('#');
-		kgdb_io_ops->write_char(hex_asc_hi(checksum));
-		kgdb_io_ops->write_char(hex_asc_lo(checksum));
-		if (kgdb_io_ops->flush)
-			kgdb_io_ops->flush();
-
-		/* Now see what we get in reply. */
-		ch = kgdb_io_ops->read_char();
-
-		if (ch == 3)
-			ch = kgdb_io_ops->read_char();
-
-		/* If we get an ACK, we are done. */
-		if (ch == '+')
-			return;
-
-		/*
-		 * If we get the start of another packet, this means
-		 * that GDB is attempting to reconnect.  We will NAK
-		 * the packet being sent, and stop trying to send this
-		 * packet.
-		 */
-		if (ch == '$') {
-			kgdb_io_ops->write_char('-');
-			if (kgdb_io_ops->flush)
-				kgdb_io_ops->flush();
-			return;
-		}
-	}
-}
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null). May return an error.
- */
-int kgdb_mem2hex(char *mem, char *buf, int count)
-{
-	char *tmp;
-	int err;
-
-	/*
-	 * We use the upper half of buf as an intermediate buffer for the
-	 * raw memory copy.  Hex conversion will work against this one.
-	 */
-	tmp = buf + count;
-
-	err = probe_kernel_read(tmp, mem, count);
-	if (!err) {
-		while (count > 0) {
-			buf = pack_hex_byte(buf, *tmp);
-			tmp++;
-			count--;
-		}
-
-		*buf = 0;
-	}
-
-	return err;
-}
-
-/*
- * Copy the binary array pointed to by buf into mem.  Fix $, #, and
- * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
- * The input buf is overwitten with the result to write to mem.
- */
-static int kgdb_ebin2mem(char *buf, char *mem, int count)
-{
-	int size = 0;
-	char *c = buf;
-
-	while (count-- > 0) {
-		c[size] = *buf++;
-		if (c[size] == 0x7d)
-			c[size] = *buf++ ^ 0x20;
-		size++;
-	}
-
-	return probe_kernel_write(mem, c, size);
-}
-
-/*
- * Convert the hex array pointed to by buf into binary to be placed in mem.
- * Return a pointer to the character AFTER the last byte written.
- * May return an error.
- */
-int kgdb_hex2mem(char *buf, char *mem, int count)
-{
-	char *tmp_raw;
-	char *tmp_hex;
-
-	/*
-	 * We use the upper half of buf as an intermediate buffer for the
-	 * raw memory that is converted from hex.
-	 */
-	tmp_raw = buf + count * 2;
-
-	tmp_hex = tmp_raw - 1;
-	while (tmp_hex >= buf) {
-		tmp_raw--;
-		*tmp_raw = hex(*tmp_hex--);
-		*tmp_raw |= hex(*tmp_hex--) << 4;
-	}
-
-	return probe_kernel_write(mem, tmp_raw, count);
-}
-
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int kgdb_hex2long(char **ptr, unsigned long *long_val)
-{
-	int hex_val;
-	int num = 0;
-	int negate = 0;
-
-	*long_val = 0;
-
-	if (**ptr == '-') {
-		negate = 1;
-		(*ptr)++;
-	}
-	while (**ptr) {
-		hex_val = hex(**ptr);
-		if (hex_val < 0)
-			break;
-
-		*long_val = (*long_val << 4) | hex_val;
-		num++;
-		(*ptr)++;
-	}
-
-	if (negate)
-		*long_val = -*long_val;
-
-	return num;
-}
-
-/* Write memory due to an 'M' or 'X' packet. */
-static int write_mem_msg(int binary)
-{
-	char *ptr = &remcom_in_buffer[1];
-	unsigned long addr;
-	unsigned long length;
-	int err;
-
-	if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
-	    kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
-		if (binary)
-			err = kgdb_ebin2mem(ptr, (char *)addr, length);
-		else
-			err = kgdb_hex2mem(ptr, (char *)addr, length);
-		if (err)
-			return err;
-		if (CACHE_FLUSH_IS_SAFE)
-			flush_icache_range(addr, addr + length);
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static void error_packet(char *pkt, int error)
-{
-	error = -error;
-	pkt[0] = 'E';
-	pkt[1] = hex_asc[(error / 10)];
-	pkt[2] = hex_asc[(error % 10)];
-	pkt[3] = '\0';
-}
-
-/*
- * Thread ID accessors. We represent a flat TID space to GDB, where
- * the per CPU idle threads (which under Linux all have PID 0) are
- * remapped to negative TIDs.
- */
-
-#define BUF_THREAD_ID_SIZE	16
-
-static char *pack_threadid(char *pkt, unsigned char *id)
-{
-	char *limit;
-
-	limit = pkt + BUF_THREAD_ID_SIZE;
-	while (pkt < limit)
-		pkt = pack_hex_byte(pkt, *id++);
-
-	return pkt;
-}
-
-static void int_to_threadref(unsigned char *id, int value)
-{
-	unsigned char *scan;
-	int i = 4;
-
-	scan = (unsigned char *)id;
-	while (i--)
-		*scan++ = 0;
-	put_unaligned_be32(value, scan);
-}
-
-static struct task_struct *getthread(struct pt_regs *regs, int tid)
-{
-	/*
-	 * Non-positive TIDs are remapped to the cpu shadow information
-	 */
-	if (tid == 0 || tid == -1)
-		tid = -atomic_read(&kgdb_active) - 2;
-	if (tid < -1 && tid > -NR_CPUS - 2) {
-		if (kgdb_info[-tid - 2].task)
-			return kgdb_info[-tid - 2].task;
-		else
-			return idle_task(-tid - 2);
-	}
-	if (tid <= 0) {
-		printk(KERN_ERR "KGDB: Internal thread select error\n");
-		dump_stack();
-		return NULL;
-	}
-
-	/*
-	 * find_task_by_pid_ns() does not take the tasklist lock anymore
-	 * but is nicely RCU locked - hence is a pretty resilient
-	 * thing to use:
-	 */
-	return find_task_by_pid_ns(tid, &init_pid_ns);
-}
-
-/*
- * Some architectures need cache flushes when we set/clear a
- * breakpoint:
- */
-static void kgdb_flush_swbreak_addr(unsigned long addr)
-{
-	if (!CACHE_FLUSH_IS_SAFE)
-		return;
-
-	if (current->mm && current->mm->mmap_cache) {
-		flush_cache_range(current->mm->mmap_cache,
-				  addr, addr + BREAK_INSTR_SIZE);
-	}
-	/* Force flush instruction cache if it was outside the mm */
-	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * SW breakpoint management:
- */
-static int kgdb_activate_sw_breakpoints(void)
-{
-	unsigned long addr;
-	int error;
-	int ret = 0;
-	int i;
-
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if (kgdb_break[i].state != BP_SET)
-			continue;
-
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_set_breakpoint(addr,
-				kgdb_break[i].saved_instr);
-		if (error) {
-			ret = error;
-			printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
-			continue;
-		}
-
-		kgdb_flush_swbreak_addr(addr);
-		kgdb_break[i].state = BP_ACTIVE;
-	}
-	return ret;
-}
-
-static int kgdb_set_sw_break(unsigned long addr)
-{
-	int err = kgdb_validate_break_address(addr);
-	int breakno = -1;
-	int i;
-
-	if (err)
-		return err;
-
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if ((kgdb_break[i].state == BP_SET) &&
-					(kgdb_break[i].bpt_addr == addr))
-			return -EEXIST;
-	}
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if (kgdb_break[i].state == BP_REMOVED &&
-					kgdb_break[i].bpt_addr == addr) {
-			breakno = i;
-			break;
-		}
-	}
-
-	if (breakno == -1) {
-		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-			if (kgdb_break[i].state == BP_UNDEFINED) {
-				breakno = i;
-				break;
-			}
-		}
-	}
-
-	if (breakno == -1)
-		return -E2BIG;
-
-	kgdb_break[breakno].state = BP_SET;
-	kgdb_break[breakno].type = BP_BREAKPOINT;
-	kgdb_break[breakno].bpt_addr = addr;
-
-	return 0;
-}
-
-static int kgdb_deactivate_sw_breakpoints(void)
-{
-	unsigned long addr;
-	int error;
-	int ret = 0;
-	int i;
-
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if (kgdb_break[i].state != BP_ACTIVE)
-			continue;
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_remove_breakpoint(addr,
-					kgdb_break[i].saved_instr);
-		if (error) {
-			printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
-			ret = error;
-		}
-
-		kgdb_flush_swbreak_addr(addr);
-		kgdb_break[i].state = BP_SET;
-	}
-	return ret;
-}
-
-static int kgdb_remove_sw_break(unsigned long addr)
-{
-	int i;
-
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if ((kgdb_break[i].state == BP_SET) &&
-				(kgdb_break[i].bpt_addr == addr)) {
-			kgdb_break[i].state = BP_REMOVED;
-			return 0;
-		}
-	}
-	return -ENOENT;
-}
-
-int kgdb_isremovedbreak(unsigned long addr)
-{
-	int i;
-
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if ((kgdb_break[i].state == BP_REMOVED) &&
-					(kgdb_break[i].bpt_addr == addr))
-			return 1;
-	}
-	return 0;
-}
-
-static int remove_all_break(void)
-{
-	unsigned long addr;
-	int error;
-	int i;
-
-	/* Clear memory breakpoints. */
-	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-		if (kgdb_break[i].state != BP_ACTIVE)
-			goto setundefined;
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_remove_breakpoint(addr,
-				kgdb_break[i].saved_instr);
-		if (error)
-			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
-			   addr);
-setundefined:
-		kgdb_break[i].state = BP_UNDEFINED;
-	}
-
-	/* Clear hardware breakpoints. */
-	if (arch_kgdb_ops.remove_all_hw_break)
-		arch_kgdb_ops.remove_all_hw_break();
-
-	return 0;
-}
-
-/*
- * Remap normal tasks to their real PID,
- * CPU shadow threads are mapped to -CPU - 2
- */
-static inline int shadow_pid(int realpid)
-{
-	if (realpid)
-		return realpid;
-
-	return -raw_smp_processor_id() - 2;
-}
-
-static char gdbmsgbuf[BUFMAX + 1];
-
-static void kgdb_msg_write(const char *s, int len)
-{
-	char *bufptr;
-	int wcount;
-	int i;
-
-	/* 'O'utput */
-	gdbmsgbuf[0] = 'O';
-
-	/* Fill and send buffers... */
-	while (len > 0) {
-		bufptr = gdbmsgbuf + 1;
-
-		/* Calculate how many this time */
-		if ((len << 1) > (BUFMAX - 2))
-			wcount = (BUFMAX - 2) >> 1;
-		else
-			wcount = len;
-
-		/* Pack in hex chars */
-		for (i = 0; i < wcount; i++)
-			bufptr = pack_hex_byte(bufptr, s[i]);
-		*bufptr = '\0';
-
-		/* Move up */
-		s += wcount;
-		len -= wcount;
-
-		/* Write packet */
-		put_packet(gdbmsgbuf);
-	}
-}
-
-/*
- * Return true if there is a valid kgdb I/O module.  Also if no
- * debugger is attached a message can be printed to the console about
- * waiting for the debugger to attach.
- *
- * The print_wait argument is only to be true when called from inside
- * the core kgdb_handle_exception, because it will wait for the
- * debugger to attach.
- */
-static int kgdb_io_ready(int print_wait)
-{
-	if (!kgdb_io_ops)
-		return 0;
-	if (kgdb_connected)
-		return 1;
-	if (atomic_read(&kgdb_setting_breakpoint))
-		return 1;
-	if (print_wait)
-		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
-	return 1;
-}
-
-/*
- * All the functions that start with gdb_cmd are the various
- * operations to implement the handlers for the gdbserial protocol
- * where KGDB is communicating with an external debugger
- */
-
-/* Handle the '?' status packets */
-static void gdb_cmd_status(struct kgdb_state *ks)
-{
-	/*
-	 * We know that this packet is only sent
-	 * during initial connect.  So to be safe,
-	 * we clear out our breakpoints now in case
-	 * GDB is reconnecting.
-	 */
-	remove_all_break();
-
-	remcom_out_buffer[0] = 'S';
-	pack_hex_byte(&remcom_out_buffer[1], ks->signo);
-}
-
-/* Handle the 'g' get registers request */
-static void gdb_cmd_getregs(struct kgdb_state *ks)
-{
-	struct task_struct *thread;
-	void *local_debuggerinfo;
-	int i;
-
-	thread = kgdb_usethread;
-	if (!thread) {
-		thread = kgdb_info[ks->cpu].task;
-		local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
-	} else {
-		local_debuggerinfo = NULL;
-		for_each_online_cpu(i) {
-			/*
-			 * Try to find the task on some other
-			 * or possibly this node if we do not
-			 * find the matching task then we try
-			 * to approximate the results.
-			 */
-			if (thread == kgdb_info[i].task)
-				local_debuggerinfo = kgdb_info[i].debuggerinfo;
-		}
-	}
-
-	/*
-	 * All threads that don't have debuggerinfo should be
-	 * in schedule() sleeping, since all other CPUs
-	 * are in kgdb_wait, and thus have debuggerinfo.
-	 */
-	if (local_debuggerinfo) {
-		pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
-	} else {
-		/*
-		 * Pull stuff saved during switch_to; nothing
-		 * else is accessible (or even particularly
-		 * relevant).
-		 *
-		 * This should be enough for a stack trace.
-		 */
-		sleeping_thread_to_gdb_regs(gdb_regs, thread);
-	}
-	kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
-}
-
-/* Handle the 'G' set registers request */
-static void gdb_cmd_setregs(struct kgdb_state *ks)
-{
-	kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
-
-	if (kgdb_usethread && kgdb_usethread != current) {
-		error_packet(remcom_out_buffer, -EINVAL);
-	} else {
-		gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
-		strcpy(remcom_out_buffer, "OK");
-	}
-}
-
-/* Handle the 'm' memory read bytes */
-static void gdb_cmd_memread(struct kgdb_state *ks)
-{
-	char *ptr = &remcom_in_buffer[1];
-	unsigned long length;
-	unsigned long addr;
-	int err;
-
-	if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
-					kgdb_hex2long(&ptr, &length) > 0) {
-		err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
-		if (err)
-			error_packet(remcom_out_buffer, err);
-	} else {
-		error_packet(remcom_out_buffer, -EINVAL);
-	}
-}
-
-/* Handle the 'M' memory write bytes */
-static void gdb_cmd_memwrite(struct kgdb_state *ks)
-{
-	int err = write_mem_msg(0);
-
-	if (err)
-		error_packet(remcom_out_buffer, err);
-	else
-		strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'X' memory binary write bytes */
-static void gdb_cmd_binwrite(struct kgdb_state *ks)
-{
-	int err = write_mem_msg(1);
-
-	if (err)
-		error_packet(remcom_out_buffer, err);
-	else
-		strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'D' or 'k', detach or kill packets */
-static void gdb_cmd_detachkill(struct kgdb_state *ks)
-{
-	int error;
-
-	/* The detach case */
-	if (remcom_in_buffer[0] == 'D') {
-		error = remove_all_break();
-		if (error < 0) {
-			error_packet(remcom_out_buffer, error);
-		} else {
-			strcpy(remcom_out_buffer, "OK");
-			kgdb_connected = 0;
-		}
-		put_packet(remcom_out_buffer);
-	} else {
-		/*
-		 * Assume the kill case, with no exit code checking,
-		 * trying to force detach the debugger:
-		 */
-		remove_all_break();
-		kgdb_connected = 0;
-	}
-}
-
-/* Handle the 'R' reboot packets */
-static int gdb_cmd_reboot(struct kgdb_state *ks)
-{
-	/* For now, only honor R0 */
-	if (strcmp(remcom_in_buffer, "R0") == 0) {
-		printk(KERN_CRIT "Executing emergency reboot\n");
-		strcpy(remcom_out_buffer, "OK");
-		put_packet(remcom_out_buffer);
-
-		/*
-		 * Execution should not return from
-		 * machine_emergency_restart()
-		 */
-		machine_emergency_restart();
-		kgdb_connected = 0;
-
-		return 1;
-	}
-	return 0;
-}
-
-/* Handle the 'q' query packets */
-static void gdb_cmd_query(struct kgdb_state *ks)
-{
-	struct task_struct *g;
-	struct task_struct *p;
-	unsigned char thref[8];
-	char *ptr;
-	int i;
-	int cpu;
-	int finished = 0;
-
-	switch (remcom_in_buffer[1]) {
-	case 's':
-	case 'f':
-		if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
-			error_packet(remcom_out_buffer, -EINVAL);
-			break;
-		}
-
-		i = 0;
-		remcom_out_buffer[0] = 'm';
-		ptr = remcom_out_buffer + 1;
-		if (remcom_in_buffer[1] == 'f') {
-			/* Each cpu is a shadow thread */
-			for_each_online_cpu(cpu) {
-				ks->thr_query = 0;
-				int_to_threadref(thref, -cpu - 2);
-				pack_threadid(ptr, thref);
-				ptr += BUF_THREAD_ID_SIZE;
-				*(ptr++) = ',';
-				i++;
-			}
-		}
-
-		do_each_thread(g, p) {
-			if (i >= ks->thr_query && !finished) {
-				int_to_threadref(thref, p->pid);
-				pack_threadid(ptr, thref);
-				ptr += BUF_THREAD_ID_SIZE;
-				*(ptr++) = ',';
-				ks->thr_query++;
-				if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
-					finished = 1;
-			}
-			i++;
-		} while_each_thread(g, p);
-
-		*(--ptr) = '\0';
-		break;
-
-	case 'C':
-		/* Current thread id */
-		strcpy(remcom_out_buffer, "QC");
-		ks->threadid = shadow_pid(current->pid);
-		int_to_threadref(thref, ks->threadid);
-		pack_threadid(remcom_out_buffer + 2, thref);
-		break;
-	case 'T':
-		if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
-			error_packet(remcom_out_buffer, -EINVAL);
-			break;
-		}
-		ks->threadid = 0;
-		ptr = remcom_in_buffer + 17;
-		kgdb_hex2long(&ptr, &ks->threadid);
-		if (!getthread(ks->linux_regs, ks->threadid)) {
-			error_packet(remcom_out_buffer, -EINVAL);
-			break;
-		}
-		if ((int)ks->threadid > 0) {
-			kgdb_mem2hex(getthread(ks->linux_regs,
-					ks->threadid)->comm,
-					remcom_out_buffer, 16);
-		} else {
-			static char tmpstr[23 + BUF_THREAD_ID_SIZE];
-
-			sprintf(tmpstr, "shadowCPU%d",
-					(int)(-ks->threadid - 2));
-			kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
-		}
-		break;
-	}
-}
-
-/* Handle the 'H' task query packets */
-static void gdb_cmd_task(struct kgdb_state *ks)
-{
-	struct task_struct *thread;
-	char *ptr;
-
-	switch (remcom_in_buffer[1]) {
-	case 'g':
-		ptr = &remcom_in_buffer[2];
-		kgdb_hex2long(&ptr, &ks->threadid);
-		thread = getthread(ks->linux_regs, ks->threadid);
-		if (!thread && ks->threadid > 0) {
-			error_packet(remcom_out_buffer, -EINVAL);
-			break;
-		}
-		kgdb_usethread = thread;
-		ks->kgdb_usethreadid = ks->threadid;
-		strcpy(remcom_out_buffer, "OK");
-		break;
-	case 'c':
-		ptr = &remcom_in_buffer[2];
-		kgdb_hex2long(&ptr, &ks->threadid);
-		if (!ks->threadid) {
-			kgdb_contthread = NULL;
-		} else {
-			thread = getthread(ks->linux_regs, ks->threadid);
-			if (!thread && ks->threadid > 0) {
-				error_packet(remcom_out_buffer, -EINVAL);
-				break;
-			}
-			kgdb_contthread = thread;
-		}
-		strcpy(remcom_out_buffer, "OK");
-		break;
-	}
-}
-
-/* Handle the 'T' thread query packets */
-static void gdb_cmd_thread(struct kgdb_state *ks)
-{
-	char *ptr = &remcom_in_buffer[1];
-	struct task_struct *thread;
-
-	kgdb_hex2long(&ptr, &ks->threadid);
-	thread = getthread(ks->linux_regs, ks->threadid);
-	if (thread)
-		strcpy(remcom_out_buffer, "OK");
-	else
-		error_packet(remcom_out_buffer, -EINVAL);
-}
-
-/* Handle the 'z' or 'Z' breakpoint remove or set packets */
-static void gdb_cmd_break(struct kgdb_state *ks)
-{
-	/*
-	 * Since GDB-5.3, it's been drafted that '0' is a software
-	 * breakpoint, '1' is a hardware breakpoint, so let's do that.
-	 */
-	char *bpt_type = &remcom_in_buffer[1];
-	char *ptr = &remcom_in_buffer[2];
-	unsigned long addr;
-	unsigned long length;
-	int error = 0;
-
-	if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
-		/* Unsupported */
-		if (*bpt_type > '4')
-			return;
-	} else {
-		if (*bpt_type != '0' && *bpt_type != '1')
-			/* Unsupported. */
-			return;
-	}
-
-	/*
-	 * Test if this is a hardware breakpoint, and
-	 * if we support it:
-	 */
-	if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
-		/* Unsupported. */
-		return;
-
-	if (*(ptr++) != ',') {
-		error_packet(remcom_out_buffer, -EINVAL);
-		return;
-	}
-	if (!kgdb_hex2long(&ptr, &addr)) {
-		error_packet(remcom_out_buffer, -EINVAL);
-		return;
-	}
-	if (*(ptr++) != ',' ||
-		!kgdb_hex2long(&ptr, &length)) {
-		error_packet(remcom_out_buffer, -EINVAL);
-		return;
-	}
-
-	if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
-		error = kgdb_set_sw_break(addr);
-	else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
-		error = kgdb_remove_sw_break(addr);
-	else if (remcom_in_buffer[0] == 'Z')
-		error = arch_kgdb_ops.set_hw_breakpoint(addr,
-			(int)length, *bpt_type - '0');
-	else if (remcom_in_buffer[0] == 'z')
-		error = arch_kgdb_ops.remove_hw_breakpoint(addr,
-			(int) length, *bpt_type - '0');
-
-	if (error == 0)
-		strcpy(remcom_out_buffer, "OK");
-	else
-		error_packet(remcom_out_buffer, error);
-}
-
-/* Handle the 'C' signal / exception passing packets */
-static int gdb_cmd_exception_pass(struct kgdb_state *ks)
-{
-	/* C09 == pass exception
-	 * C15 == detach kgdb, pass exception
-	 */
-	if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
-
-		ks->pass_exception = 1;
-		remcom_in_buffer[0] = 'c';
-
-	} else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
-
-		ks->pass_exception = 1;
-		remcom_in_buffer[0] = 'D';
-		remove_all_break();
-		kgdb_connected = 0;
-		return 1;
-
-	} else {
-		kgdb_msg_write("KGDB only knows signal 9 (pass)"
-			" and 15 (pass and disconnect)\n"
-			"Executing a continue without signal passing\n", 0);
-		remcom_in_buffer[0] = 'c';
-	}
-
-	/* Indicate fall through */
-	return -1;
-}
-
-/*
- * This function performs all gdbserial command procesing
- */
-static int gdb_serial_stub(struct kgdb_state *ks)
-{
-	int error = 0;
-	int tmp;
-
-	/* Clear the out buffer. */
-	memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
-	if (kgdb_connected) {
-		unsigned char thref[8];
-		char *ptr;
-
-		/* Reply to host that an exception has occurred */
-		ptr = remcom_out_buffer;
-		*ptr++ = 'T';
-		ptr = pack_hex_byte(ptr, ks->signo);
-		ptr += strlen(strcpy(ptr, "thread:"));
-		int_to_threadref(thref, shadow_pid(current->pid));
-		ptr = pack_threadid(ptr, thref);
-		*ptr++ = ';';
-		put_packet(remcom_out_buffer);
-	}
-
-	kgdb_usethread = kgdb_info[ks->cpu].task;
-	ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
-	ks->pass_exception = 0;
-
-	while (1) {
-		error = 0;
-
-		/* Clear the out buffer. */
-		memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
-		get_packet(remcom_in_buffer);
-
-		switch (remcom_in_buffer[0]) {
-		case '?': /* gdbserial status */
-			gdb_cmd_status(ks);
-			break;
-		case 'g': /* return the value of the CPU registers */
-			gdb_cmd_getregs(ks);
-			break;
-		case 'G': /* set the value of the CPU registers - return OK */
-			gdb_cmd_setregs(ks);
-			break;
-		case 'm': /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
-			gdb_cmd_memread(ks);
-			break;
-		case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
-			gdb_cmd_memwrite(ks);
-			break;
-		case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
-			gdb_cmd_binwrite(ks);
-			break;
-			/* kill or detach. KGDB should treat this like a
-			 * continue.
-			 */
-		case 'D': /* Debugger detach */
-		case 'k': /* Debugger detach via kill */
-			gdb_cmd_detachkill(ks);
-			goto default_handle;
-		case 'R': /* Reboot */
-			if (gdb_cmd_reboot(ks))
-				goto default_handle;
-			break;
-		case 'q': /* query command */
-			gdb_cmd_query(ks);
-			break;
-		case 'H': /* task related */
-			gdb_cmd_task(ks);
-			break;
-		case 'T': /* Query thread status */
-			gdb_cmd_thread(ks);
-			break;
-		case 'z': /* Break point remove */
-		case 'Z': /* Break point set */
-			gdb_cmd_break(ks);
-			break;
-		case 'C': /* Exception passing */
-			tmp = gdb_cmd_exception_pass(ks);
-			if (tmp > 0)
-				goto default_handle;
-			if (tmp == 0)
-				break;
-			/* Fall through on tmp < 0 */
-		case 'c': /* Continue packet */
-		case 's': /* Single step packet */
-			if (kgdb_contthread && kgdb_contthread != current) {
-				/* Can't switch threads in kgdb */
-				error_packet(remcom_out_buffer, -EINVAL);
-				break;
-			}
-			kgdb_activate_sw_breakpoints();
-			/* Fall through to default processing */
-		default:
-default_handle:
-			error = kgdb_arch_handle_exception(ks->ex_vector,
-						ks->signo,
-						ks->err_code,
-						remcom_in_buffer,
-						remcom_out_buffer,
-						ks->linux_regs);
-			/*
-			 * Leave cmd processing on error, detach,
-			 * kill, continue, or single step.
-			 */
-			if (error >= 0 || remcom_in_buffer[0] == 'D' ||
-			    remcom_in_buffer[0] == 'k') {
-				error = 0;
-				goto kgdb_exit;
-			}
-
-		}
-
-		/* reply to the request */
-		put_packet(remcom_out_buffer);
-	}
-
-kgdb_exit:
-	if (ks->pass_exception)
-		error = 1;
-	return error;
-}
-
-static int kgdb_reenter_check(struct kgdb_state *ks)
-{
-	unsigned long addr;
-
-	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
-		return 0;
-
-	/* Panic on recursive debugger calls: */
-	exception_level++;
-	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
-	kgdb_deactivate_sw_breakpoints();
-
-	/*
-	 * If the break point removed ok at the place exception
-	 * occurred, try to recover and print a warning to the end
-	 * user because the user planted a breakpoint in a place that
-	 * KGDB needs in order to function.
-	 */
-	if (kgdb_remove_sw_break(addr) == 0) {
-		exception_level = 0;
-		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-		kgdb_activate_sw_breakpoints();
-		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
-			addr);
-		WARN_ON_ONCE(1);
-
-		return 1;
-	}
-	remove_all_break();
-	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-
-	if (exception_level > 1) {
-		dump_stack();
-		panic("Recursive entry to debugger");
-	}
-
-	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
-	dump_stack();
-	panic("Recursive entry to debugger");
-
-	return 1;
-}
-
-static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
-{
-	unsigned long flags;
-	int sstep_tries = 100;
-	int error = 0;
-	int i, cpu;
-	int trace_on = 0;
-acquirelock:
-	/*
-	 * Interrupts will be restored by the 'trap return' code, except when
-	 * single stepping.
-	 */
-	local_irq_save(flags);
-
-	cpu = ks->cpu;
-	kgdb_info[cpu].debuggerinfo = regs;
-	kgdb_info[cpu].task = current;
-	/*
-	 * Make sure the above info reaches the primary CPU before
-	 * our cpu_in_kgdb[] flag setting does:
-	 */
-	atomic_inc(&cpu_in_kgdb[cpu]);
-
-	/*
-	 * CPU will loop if it is a slave or request to become a kgdb
-	 * master cpu and acquire the kgdb_active lock:
-	 */
-	while (1) {
-		if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
-			if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
-				break;
-		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
-			if (!atomic_read(&passive_cpu_wait[cpu]))
-				goto return_normal;
-		} else {
-return_normal:
-			/* Return to normal operation by executing any
-			 * hw breakpoint fixup.
-			 */
-			if (arch_kgdb_ops.correct_hw_break)
-				arch_kgdb_ops.correct_hw_break();
-			if (trace_on)
-				tracing_on();
-			atomic_dec(&cpu_in_kgdb[cpu]);
-			touch_softlockup_watchdog_sync();
-			clocksource_touch_watchdog();
-			local_irq_restore(flags);
-			return 0;
-		}
-		cpu_relax();
-	}
-
-	/*
-	 * For single stepping, try to only enter on the processor
-	 * that was single stepping.  To gaurd against a deadlock, the
-	 * kernel will only try for the value of sstep_tries before
-	 * giving up and continuing on.
-	 */
-	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
-	    (kgdb_info[cpu].task &&
-	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
-		atomic_set(&kgdb_active, -1);
-		touch_softlockup_watchdog_sync();
-		clocksource_touch_watchdog();
-		local_irq_restore(flags);
-
-		goto acquirelock;
-	}
-
-	if (!kgdb_io_ready(1)) {
-		error = 1;
-		goto kgdb_restore; /* No I/O connection, so resume the system */
-	}
-
-	/*
-	 * Don't enter if we have hit a removed breakpoint.
-	 */
-	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
-		goto kgdb_restore;
-
-	/* Call the I/O driver's pre_exception routine */
-	if (kgdb_io_ops->pre_exception)
-		kgdb_io_ops->pre_exception();
-
-	kgdb_disable_hw_debug(ks->linux_regs);
-
-	/*
-	 * Get the passive CPU lock which will hold all the non-primary
-	 * CPU in a spin state while the debugger is active
-	 */
-	if (!kgdb_single_step) {
-		for (i = 0; i < NR_CPUS; i++)
-			atomic_inc(&passive_cpu_wait[i]);
-	}
-
-#ifdef CONFIG_SMP
-	/* Signal the other CPUs to enter kgdb_wait() */
-	if ((!kgdb_single_step) && kgdb_do_roundup)
-		kgdb_roundup_cpus(flags);
-#endif
-
-	/*
-	 * Wait for the other CPUs to be notified and be waiting for us:
-	 */
-	for_each_online_cpu(i) {
-		while (!atomic_read(&cpu_in_kgdb[i]))
-			cpu_relax();
-	}
-
-	/*
-	 * At this point the primary processor is completely
-	 * in the debugger and all secondary CPUs are quiescent
-	 */
-	kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
-	kgdb_deactivate_sw_breakpoints();
-	kgdb_single_step = 0;
-	kgdb_contthread = current;
-	exception_level = 0;
-	trace_on = tracing_is_on();
-	if (trace_on)
-		tracing_off();
-
-	/* Talk to debugger with gdbserial protocol */
-	error = gdb_serial_stub(ks);
-
-	/* Call the I/O driver's post_exception routine */
-	if (kgdb_io_ops->post_exception)
-		kgdb_io_ops->post_exception();
-
-	atomic_dec(&cpu_in_kgdb[ks->cpu]);
-
-	if (!kgdb_single_step) {
-		for (i = NR_CPUS-1; i >= 0; i--)
-			atomic_dec(&passive_cpu_wait[i]);
-		/*
-		 * Wait till all the CPUs have quit
-		 * from the debugger.
-		 */
-		for_each_online_cpu(i) {
-			while (atomic_read(&cpu_in_kgdb[i]))
-				cpu_relax();
-		}
-	}
-
-kgdb_restore:
-	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
-		if (kgdb_info[sstep_cpu].task)
-			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
-		else
-			kgdb_sstep_pid = 0;
-	}
-	if (trace_on)
-		tracing_on();
-	/* Free kgdb_active */
-	atomic_set(&kgdb_active, -1);
-	touch_softlockup_watchdog_sync();
-	clocksource_touch_watchdog();
-	local_irq_restore(flags);
-
-	return error;
-}
-
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- *	interface locks, if any (begin_session)
- *	kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
-{
-	struct kgdb_state kgdb_var;
-	struct kgdb_state *ks = &kgdb_var;
-	int ret;
-
-	ks->cpu			= raw_smp_processor_id();
-	ks->ex_vector		= evector;
-	ks->signo		= signo;
-	ks->ex_vector		= evector;
-	ks->err_code		= ecode;
-	ks->kgdb_usethreadid	= 0;
-	ks->linux_regs		= regs;
-
-	if (kgdb_reenter_check(ks))
-		return 0; /* Ouch, double exception ! */
-	kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
-	ret = kgdb_cpu_enter(ks, regs);
-	kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
-	return ret;
-}
-
-int kgdb_nmicallback(int cpu, void *regs)
-{
-#ifdef CONFIG_SMP
-	struct kgdb_state kgdb_var;
-	struct kgdb_state *ks = &kgdb_var;
-
-	memset(ks, 0, sizeof(struct kgdb_state));
-	ks->cpu			= cpu;
-	ks->linux_regs		= regs;
-
-	if (!atomic_read(&cpu_in_kgdb[cpu]) &&
-	    atomic_read(&kgdb_active) != -1 &&
-	    atomic_read(&kgdb_active) != cpu) {
-		kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
-		kgdb_cpu_enter(ks, regs);
-		kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
-		return 0;
-	}
-#endif
-	return 1;
-}
-
-static void kgdb_console_write(struct console *co, const char *s,
-   unsigned count)
-{
-	unsigned long flags;
-
-	/* If we're debugging, or KGDB has not connected, don't try
-	 * and print. */
-	if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
-		return;
-
-	local_irq_save(flags);
-	kgdb_msg_write(s, count);
-	local_irq_restore(flags);
-}
-
-static struct console kgdbcons = {
-	.name		= "kgdb",
-	.write		= kgdb_console_write,
-	.flags		= CON_PRINTBUFFER | CON_ENABLED,
-	.index		= -1,
-};
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_gdb(int key, struct tty_struct *tty)
-{
-	if (!kgdb_io_ops) {
-		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
-		return;
-	}
-	if (!kgdb_connected)
-		printk(KERN_CRIT "Entering KGDB\n");
-
-	kgdb_breakpoint();
-}
-
-static struct sysrq_key_op sysrq_gdb_op = {
-	.handler	= sysrq_handle_gdb,
-	.help_msg	= "debug(G)",
-	.action_msg	= "DEBUG",
-};
-#endif
-
-static void kgdb_register_callbacks(void)
-{
-	if (!kgdb_io_module_registered) {
-		kgdb_io_module_registered = 1;
-		kgdb_arch_init();
-#ifdef CONFIG_MAGIC_SYSRQ
-		register_sysrq_key('g', &sysrq_gdb_op);
-#endif
-		if (kgdb_use_con && !kgdb_con_registered) {
-			register_console(&kgdbcons);
-			kgdb_con_registered = 1;
-		}
-	}
-}
-
-static void kgdb_unregister_callbacks(void)
-{
-	/*
-	 * When this routine is called KGDB should unregister from the
-	 * panic handler and clean up, making sure it is not handling any
-	 * break exceptions at the time.
-	 */
-	if (kgdb_io_module_registered) {
-		kgdb_io_module_registered = 0;
-		kgdb_arch_exit();
-#ifdef CONFIG_MAGIC_SYSRQ
-		unregister_sysrq_key('g', &sysrq_gdb_op);
-#endif
-		if (kgdb_con_registered) {
-			unregister_console(&kgdbcons);
-			kgdb_con_registered = 0;
-		}
-	}
-}
-
-static void kgdb_initial_breakpoint(void)
-{
-	kgdb_break_asap = 0;
-
-	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
-	kgdb_breakpoint();
-}
-
-/**
- *	kgdb_register_io_module - register KGDB IO module
- *	@new_kgdb_io_ops: the io ops vector
- *
- *	Register it with the KGDB core.
- */
-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
-{
-	int err;
-
-	spin_lock(&kgdb_registration_lock);
-
-	if (kgdb_io_ops) {
-		spin_unlock(&kgdb_registration_lock);
-
-		printk(KERN_ERR "kgdb: Another I/O driver is already "
-				"registered with KGDB.\n");
-		return -EBUSY;
-	}
-
-	if (new_kgdb_io_ops->init) {
-		err = new_kgdb_io_ops->init();
-		if (err) {
-			spin_unlock(&kgdb_registration_lock);
-			return err;
-		}
-	}
-
-	kgdb_io_ops = new_kgdb_io_ops;
-
-	spin_unlock(&kgdb_registration_lock);
-
-	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
-	       new_kgdb_io_ops->name);
-
-	/* Arm KGDB now. */
-	kgdb_register_callbacks();
-
-	if (kgdb_break_asap)
-		kgdb_initial_breakpoint();
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(kgdb_register_io_module);
-
-/**
- *	kkgdb_unregister_io_module - unregister KGDB IO module
- *	@old_kgdb_io_ops: the io ops vector
- *
- *	Unregister it with the KGDB core.
- */
-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
-{
-	BUG_ON(kgdb_connected);
-
-	/*
-	 * KGDB is no longer able to communicate out, so
-	 * unregister our callbacks and reset state.
-	 */
-	kgdb_unregister_callbacks();
-
-	spin_lock(&kgdb_registration_lock);
-
-	WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops);
-	kgdb_io_ops = NULL;
-
-	spin_unlock(&kgdb_registration_lock);
-
-	printk(KERN_INFO
-		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
-		old_kgdb_io_ops->name);
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
-
-/**
- * kgdb_breakpoint - generate breakpoint exception
- *
- * This function will generate a breakpoint exception.  It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void kgdb_breakpoint(void)
-{
-	atomic_inc(&kgdb_setting_breakpoint);
-	wmb(); /* Sync point before breakpoint */
-	arch_kgdb_breakpoint();
-	wmb(); /* Sync point after breakpoint */
-	atomic_dec(&kgdb_setting_breakpoint);
-}
-EXPORT_SYMBOL_GPL(kgdb_breakpoint);
-
-static int __init opt_kgdb_wait(char *str)
-{
-	kgdb_break_asap = 1;
-
-	if (kgdb_io_module_registered)
-		kgdb_initial_breakpoint();
-
-	return 0;
-}
-
-early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 21fe3c4..0b624e7 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -138,7 +138,8 @@
 extern const void __stop_notes __attribute__((weak));
 #define	notes_size (&__stop_notes - &__start_notes)
 
-static ssize_t notes_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+static ssize_t notes_read(struct file *filp, struct kobject *kobj,
+			  struct bin_attribute *bin_attr,
 			  char *buf, loff_t off, size_t count)
 {
 	memcpy(buf, &__start_notes + off, count);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ec21304..5428679 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2711,6 +2711,8 @@
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
+struct lock_class_key __lockdep_no_validate__;
+
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
@@ -2745,6 +2747,9 @@
 		return 0;
 	}
 
+	if (lock->key == &__lockdep_no_validate__)
+		check = 1;
+
 	if (!subclass)
 		class = lock->class_cache;
 	/*
diff --git a/kernel/module.c b/kernel/module.c
index e256458..3c4fc4b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -77,6 +77,10 @@
 DEFINE_MUTEX(module_mutex);
 EXPORT_SYMBOL_GPL(module_mutex);
 static LIST_HEAD(modules);
+#ifdef CONFIG_KGDB_KDB
+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+#endif /* CONFIG_KGDB_KDB */
+
 
 /* Block module loading/unloading? */
 int modules_disabled = 0;
@@ -1182,7 +1186,7 @@
 	struct bin_attribute attrs[0];
 };
 
-static ssize_t module_notes_read(struct kobject *kobj,
+static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t pos, size_t count)
 {
diff --git a/kernel/padata.c b/kernel/padata.c
index fd03513..b1c9857 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -29,7 +29,7 @@
 #include <linux/rcupdate.h>
 
 #define MAX_SEQ_NR INT_MAX - NR_CPUS
-#define MAX_OBJ_NUM 10000 * NR_CPUS
+#define MAX_OBJ_NUM 1000
 
 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
 {
@@ -88,7 +88,7 @@
 	local_bh_enable();
 }
 
-/*
+/**
  * padata_do_parallel - padata parallelization function
  *
  * @pinst: padata instance
@@ -152,6 +152,23 @@
 }
 EXPORT_SYMBOL(padata_do_parallel);
 
+/*
+ * padata_get_next - Get the next object that needs serialization.
+ *
+ * Return values are:
+ *
+ * A pointer to the control struct of the next object that needs
+ * serialization, if present in one of the percpu reorder queues.
+ *
+ * NULL, if all percpu reorder queues are empty.
+ *
+ * -EINPROGRESS, if the next object that needs serialization will
+ *  be parallel processed by another cpu and is not yet present in
+ *  the cpu's reorder queue.
+ *
+ * -ENODATA, if this cpu has to do the parallel processing for
+ *  the next object.
+ */
 static struct padata_priv *padata_get_next(struct parallel_data *pd)
 {
 	int cpu, num_cpus, empty, calc_seq_nr;
@@ -173,7 +190,7 @@
 
 		/*
 		 * Calculate the seq_nr of the object that should be
-		 * next in this queue.
+		 * next in this reorder queue.
 		 */
 		overrun = 0;
 		calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
@@ -231,7 +248,8 @@
 		goto out;
 	}
 
-	if (next_nr % num_cpus == next_queue->cpu_index) {
+	queue = per_cpu_ptr(pd->queue, smp_processor_id());
+	if (queue->cpu_index == next_queue->cpu_index) {
 		padata = ERR_PTR(-ENODATA);
 		goto out;
 	}
@@ -247,19 +265,40 @@
 	struct padata_queue *queue;
 	struct padata_instance *pinst = pd->pinst;
 
-try_again:
+	/*
+	 * We need to ensure that only one cpu can work on dequeueing of
+	 * the reorder queue the time. Calculating in which percpu reorder
+	 * queue the next object will arrive takes some time. A spinlock
+	 * would be highly contended. Also it is not clear in which order
+	 * the objects arrive to the reorder queues. So a cpu could wait to
+	 * get the lock just to notice that there is nothing to do at the
+	 * moment. Therefore we use a trylock and let the holder of the lock
+	 * care for all the objects enqueued during the holdtime of the lock.
+	 */
 	if (!spin_trylock_bh(&pd->lock))
-		goto out;
+		return;
 
 	while (1) {
 		padata = padata_get_next(pd);
 
+		/*
+		 * All reorder queues are empty, or the next object that needs
+		 * serialization is parallel processed by another cpu and is
+		 * still on it's way to the cpu's reorder queue, nothing to
+		 * do for now.
+		 */
 		if (!padata || PTR_ERR(padata) == -EINPROGRESS)
 			break;
 
+		/*
+		 * This cpu has to do the parallel processing of the next
+		 * object. It's waiting in the cpu's parallelization queue,
+		 * so exit imediately.
+		 */
 		if (PTR_ERR(padata) == -ENODATA) {
+			del_timer(&pd->timer);
 			spin_unlock_bh(&pd->lock);
-			goto out;
+			return;
 		}
 
 		queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
@@ -273,13 +312,27 @@
 
 	spin_unlock_bh(&pd->lock);
 
-	if (atomic_read(&pd->reorder_objects))
-		goto try_again;
+	/*
+	 * The next object that needs serialization might have arrived to
+	 * the reorder queues in the meantime, we will be called again
+	 * from the timer function if noone else cares for it.
+	 */
+	if (atomic_read(&pd->reorder_objects)
+			&& !(pinst->flags & PADATA_RESET))
+		mod_timer(&pd->timer, jiffies + HZ);
+	else
+		del_timer(&pd->timer);
 
-out:
 	return;
 }
 
+static void padata_reorder_timer(unsigned long arg)
+{
+	struct parallel_data *pd = (struct parallel_data *)arg;
+
+	padata_reorder(pd);
+}
+
 static void padata_serial_worker(struct work_struct *work)
 {
 	struct padata_queue *queue;
@@ -308,7 +361,7 @@
 	local_bh_enable();
 }
 
-/*
+/**
  * padata_do_serial - padata serialization function
  *
  * @padata: object to be serialized.
@@ -338,6 +391,7 @@
 }
 EXPORT_SYMBOL(padata_do_serial);
 
+/* Allocate and initialize the internal cpumask dependend resources. */
 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
 					     const struct cpumask *cpumask)
 {
@@ -358,17 +412,15 @@
 	if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
 		goto err_free_queue;
 
-	for_each_possible_cpu(cpu) {
+	cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+	for_each_cpu(cpu, pd->cpumask) {
 		queue = per_cpu_ptr(pd->queue, cpu);
 
 		queue->pd = pd;
 
-		if (cpumask_test_cpu(cpu, cpumask)
-		    && cpumask_test_cpu(cpu, cpu_active_mask)) {
-			queue->cpu_index = cpu_index;
-			cpu_index++;
-		} else
-			queue->cpu_index = -1;
+		queue->cpu_index = cpu_index;
+		cpu_index++;
 
 		INIT_LIST_HEAD(&queue->reorder.list);
 		INIT_LIST_HEAD(&queue->parallel.list);
@@ -382,11 +434,10 @@
 		atomic_set(&queue->num_obj, 0);
 	}
 
-	cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
-
 	num_cpus = cpumask_weight(pd->cpumask);
 	pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
 
+	setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
 	atomic_set(&pd->seq_nr, -1);
 	atomic_set(&pd->reorder_objects, 0);
 	atomic_set(&pd->refcnt, 0);
@@ -410,6 +461,31 @@
 	kfree(pd);
 }
 
+/* Flush all objects out of the padata queues. */
+static void padata_flush_queues(struct parallel_data *pd)
+{
+	int cpu;
+	struct padata_queue *queue;
+
+	for_each_cpu(cpu, pd->cpumask) {
+		queue = per_cpu_ptr(pd->queue, cpu);
+		flush_work(&queue->pwork);
+	}
+
+	del_timer_sync(&pd->timer);
+
+	if (atomic_read(&pd->reorder_objects))
+		padata_reorder(pd);
+
+	for_each_cpu(cpu, pd->cpumask) {
+		queue = per_cpu_ptr(pd->queue, cpu);
+		flush_work(&queue->swork);
+	}
+
+	BUG_ON(atomic_read(&pd->refcnt) != 0);
+}
+
+/* Replace the internal control stucture with a new one. */
 static void padata_replace(struct padata_instance *pinst,
 			   struct parallel_data *pd_new)
 {
@@ -421,17 +497,13 @@
 
 	synchronize_rcu();
 
-	while (atomic_read(&pd_old->refcnt) != 0)
-		yield();
-
-	flush_workqueue(pinst->wq);
-
+	padata_flush_queues(pd_old);
 	padata_free_pd(pd_old);
 
 	pinst->flags &= ~PADATA_RESET;
 }
 
-/*
+/**
  * padata_set_cpumask - set the cpumask that padata should use
  *
  * @pinst: padata instance
@@ -443,10 +515,10 @@
 	struct parallel_data *pd;
 	int err = 0;
 
-	might_sleep();
-
 	mutex_lock(&pinst->lock);
 
+	get_online_cpus();
+
 	pd = padata_alloc_pd(pinst, cpumask);
 	if (!pd) {
 		err = -ENOMEM;
@@ -458,6 +530,8 @@
 	padata_replace(pinst, pd);
 
 out:
+	put_online_cpus();
+
 	mutex_unlock(&pinst->lock);
 
 	return err;
@@ -479,7 +553,7 @@
 	return 0;
 }
 
-/*
+/**
  * padata_add_cpu - add a cpu to the padata cpumask
  *
  * @pinst: padata instance
@@ -489,12 +563,12 @@
 {
 	int err;
 
-	might_sleep();
-
 	mutex_lock(&pinst->lock);
 
+	get_online_cpus();
 	cpumask_set_cpu(cpu, pinst->cpumask);
 	err = __padata_add_cpu(pinst, cpu);
+	put_online_cpus();
 
 	mutex_unlock(&pinst->lock);
 
@@ -517,7 +591,7 @@
 	return 0;
 }
 
-/*
+/**
  * padata_remove_cpu - remove a cpu from the padata cpumask
  *
  * @pinst: padata instance
@@ -527,12 +601,12 @@
 {
 	int err;
 
-	might_sleep();
-
 	mutex_lock(&pinst->lock);
 
+	get_online_cpus();
 	cpumask_clear_cpu(cpu, pinst->cpumask);
 	err = __padata_remove_cpu(pinst, cpu);
+	put_online_cpus();
 
 	mutex_unlock(&pinst->lock);
 
@@ -540,38 +614,35 @@
 }
 EXPORT_SYMBOL(padata_remove_cpu);
 
-/*
+/**
  * padata_start - start the parallel processing
  *
  * @pinst: padata instance to start
  */
 void padata_start(struct padata_instance *pinst)
 {
-	might_sleep();
-
 	mutex_lock(&pinst->lock);
 	pinst->flags |= PADATA_INIT;
 	mutex_unlock(&pinst->lock);
 }
 EXPORT_SYMBOL(padata_start);
 
-/*
+/**
  * padata_stop - stop the parallel processing
  *
  * @pinst: padata instance to stop
  */
 void padata_stop(struct padata_instance *pinst)
 {
-	might_sleep();
-
 	mutex_lock(&pinst->lock);
 	pinst->flags &= ~PADATA_INIT;
 	mutex_unlock(&pinst->lock);
 }
 EXPORT_SYMBOL(padata_stop);
 
-static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
-					 unsigned long action, void *hcpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static int padata_cpu_callback(struct notifier_block *nfb,
+			       unsigned long action, void *hcpu)
 {
 	int err;
 	struct padata_instance *pinst;
@@ -621,8 +692,9 @@
 
 	return NOTIFY_OK;
 }
+#endif
 
-/*
+/**
  * padata_alloc - allocate and initialize a padata instance
  *
  * @cpumask: cpumask that padata uses for parallelization
@@ -631,7 +703,6 @@
 struct padata_instance *padata_alloc(const struct cpumask *cpumask,
 				     struct workqueue_struct *wq)
 {
-	int err;
 	struct padata_instance *pinst;
 	struct parallel_data *pd;
 
@@ -639,6 +710,8 @@
 	if (!pinst)
 		goto err;
 
+	get_online_cpus();
+
 	pd = padata_alloc_pd(pinst, cpumask);
 	if (!pd)
 		goto err_free_inst;
@@ -654,31 +727,32 @@
 
 	pinst->flags = 0;
 
+#ifdef CONFIG_HOTPLUG_CPU
 	pinst->cpu_notifier.notifier_call = padata_cpu_callback;
 	pinst->cpu_notifier.priority = 0;
-	err = register_hotcpu_notifier(&pinst->cpu_notifier);
-	if (err)
-		goto err_free_cpumask;
+	register_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+
+	put_online_cpus();
 
 	mutex_init(&pinst->lock);
 
 	return pinst;
 
-err_free_cpumask:
-	free_cpumask_var(pinst->cpumask);
 err_free_pd:
 	padata_free_pd(pd);
 err_free_inst:
 	kfree(pinst);
+	put_online_cpus();
 err:
 	return NULL;
 }
 EXPORT_SYMBOL(padata_alloc);
 
-/*
+/**
  * padata_free - free a padata instance
  *
- * @ padata_inst: padata instance to free
+ * @padata_inst: padata instance to free
  */
 void padata_free(struct padata_instance *pinst)
 {
@@ -686,10 +760,13 @@
 
 	synchronize_rcu();
 
-	while (atomic_read(&pinst->pd->refcnt) != 0)
-		yield();
-
+#ifdef CONFIG_HOTPLUG_CPU
 	unregister_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+	get_online_cpus();
+	padata_flush_queues(pinst->pd);
+	put_online_cpus();
+
 	padata_free_pd(pinst->pd);
 	free_cpumask_var(pinst->cpumask);
 	kfree(pinst);
diff --git a/kernel/printk.c b/kernel/printk.c
index 75077ad..444b770 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/kexec.h>
+#include <linux/kdb.h>
 #include <linux/ratelimit.h>
 #include <linux/kmsg_dump.h>
 #include <linux/syslog.h>
@@ -413,6 +414,22 @@
 	return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
 }
 
+#ifdef	CONFIG_KGDB_KDB
+/* kdb dmesg command needs access to the syslog buffer.  do_syslog()
+ * uses locks so it cannot be used during debugging.  Just tell kdb
+ * where the start and end of the physical and logical logs are.  This
+ * is equivalent to do_syslog(3).
+ */
+void kdb_syslog_data(char *syslog_data[4])
+{
+	syslog_data[0] = log_buf;
+	syslog_data[1] = log_buf + log_buf_len;
+	syslog_data[2] = log_buf + log_end -
+		(logged_chars < log_buf_len ? logged_chars : log_buf_len);
+	syslog_data[3] = log_buf + log_end;
+}
+#endif	/* CONFIG_KGDB_KDB */
+
 /*
  * Call the console drivers on a range of log_buf
  */
@@ -586,6 +603,14 @@
 	va_list args;
 	int r;
 
+#ifdef CONFIG_KGDB_KDB
+	if (unlikely(kdb_trap_printk)) {
+		va_start(args, fmt);
+		r = vkdb_printf(fmt, args);
+		va_end(args);
+		return r;
+	}
+#endif
 	va_start(args, fmt);
 	r = vprintk(fmt, args);
 	va_end(args);
diff --git a/kernel/relay.c b/kernel/relay.c
index 3d97f28..4268287 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1231,8 +1231,8 @@
 	size_t read_subbuf = read_start / subbuf_size;
 	size_t padding = rbuf->padding[read_subbuf];
 	size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
-	struct page *pages[PIPE_BUFFERS];
-	struct partial_page partial[PIPE_BUFFERS];
+	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[PIPE_DEF_BUFFERS];
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.nr_pages = 0,
@@ -1245,6 +1245,8 @@
 
 	if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
 		return 0;
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
 
 	/*
 	 * Adjust read len, if longer than what is available
@@ -1255,7 +1257,7 @@
 	subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
 	pidx = (read_start / PAGE_SIZE) % subbuf_pages;
 	poff = read_start & ~PAGE_MASK;
-	nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
+	nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers);
 
 	for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
 		unsigned int this_len, this_end, private;
@@ -1289,16 +1291,19 @@
 		}
 	}
 
+	ret = 0;
 	if (!spd.nr_pages)
-		return 0;
+		goto out;
 
 	ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
 	if (ret < 0 || ret < total_len)
-		return ret;
+		goto out;
 
         if (read_start + ret == nonpad_end)
                 ret += padding;
 
+out:
+	splice_shrink_spd(pipe, &spd);
         return ret;
 }
 
diff --git a/kernel/sched.c b/kernel/sched.c
index d9c0368..054a601 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7759,9 +7759,9 @@
 
 #endif /* CONFIG_MAGIC_SYSRQ */
 
-#ifdef CONFIG_IA64
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
 /*
- * These functions are only useful for the IA64 MCA handling.
+ * These functions are only useful for the IA64 MCA handling, or kdb.
  *
  * They can only be called when the whole system has been
  * stopped - every CPU needs to be quiescent, and no scheduling
@@ -7781,6 +7781,9 @@
 	return cpu_curr(cpu);
 }
 
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
 /**
  * set_curr_task - set the current task for a given cpu.
  * @cpu: the processor in question.
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 5b49613..906a0f7 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -41,6 +41,7 @@
 	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
 					* (NSEC_PER_SEC / HZ);
 }
+EXPORT_SYMBOL_GPL(sched_clock);
 
 static __read_mostly int sched_clock_running;
 
diff --git a/kernel/signal.c b/kernel/signal.c
index dbd7fe0..825a3f2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2735,3 +2735,43 @@
 {
 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals.  This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+{
+	static struct task_struct *kdb_prev_t;
+	int sig, new_t;
+	if (!spin_trylock(&t->sighand->siglock)) {
+		kdb_printf("Can't do kill command now.\n"
+			   "The sigmask lock is held somewhere else in "
+			   "kernel, try again later\n");
+		return;
+	}
+	spin_unlock(&t->sighand->siglock);
+	new_t = kdb_prev_t != t;
+	kdb_prev_t = t;
+	if (t->state != TASK_RUNNING && new_t) {
+		kdb_printf("Process is not RUNNING, sending a signal from "
+			   "kdb risks deadlock\n"
+			   "on the run queue locks. "
+			   "The signal has _not_ been sent.\n"
+			   "Reissue the kill command if you want to risk "
+			   "the deadlock.\n");
+		return;
+	}
+	sig = info->si_signo;
+	if (send_sig_info(sig, info, t))
+		kdb_printf("Fail to deliver Signal %d to process %d.\n",
+			   sig, t->pid);
+	else
+		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif	/* CONFIG_KGDB_KDB */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b125830..4c93486 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -52,6 +52,7 @@
 #include <linux/slow-work.h>
 #include <linux/perf_event.h>
 #include <linux/kprobes.h>
+#include <linux/pipe_fs_i.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -1444,6 +1445,14 @@
 		.child		= binfmt_misc_table,
 	},
 #endif
+	{
+		.procname	= "pipe-max-pages",
+		.data		= &pipe_max_pages,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &two,
+	},
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
@@ -2083,20 +2092,20 @@
 
 #define TMPBUFLEN 22
 /**
- * proc_get_long - reads an ASCII formated integer from a user buffer
+ * proc_get_long - reads an ASCII formatted integer from a user buffer
  *
- * @buf - a kernel buffer
- * @size - size of the kernel buffer
- * @val - this is where the number will be stored
- * @neg - set to %TRUE if number is negative
- * @perm_tr - a vector which contains the allowed trailers
- * @perm_tr_len - size of the perm_tr vector
- * @tr - pointer to store the trailer character
+ * @buf: a kernel buffer
+ * @size: size of the kernel buffer
+ * @val: this is where the number will be stored
+ * @neg: set to %TRUE if number is negative
+ * @perm_tr: a vector which contains the allowed trailers
+ * @perm_tr_len: size of the perm_tr vector
+ * @tr: pointer to store the trailer character
  *
- * In case of success 0 is returned and buf and size are updated with
- * the amount of bytes read. If tr is non NULL and a trailing
- * character exist (size is non zero after returning from this
- * function) tr is updated with the trailing character.
+ * In case of success %0 is returned and @buf and @size are updated with
+ * the amount of bytes read. If @tr is non-NULL and a trailing
+ * character exists (size is non-zero after returning from this
+ * function), @tr is updated with the trailing character.
  */
 static int proc_get_long(char **buf, size_t *size,
 			  unsigned long *val, bool *neg,
@@ -2147,15 +2156,15 @@
 }
 
 /**
- * proc_put_long - coverts an integer to a decimal ASCII formated string
+ * proc_put_long - converts an integer to a decimal ASCII formatted string
  *
- * @buf - the user buffer
- * @size - the size of the user buffer
- * @val - the integer to be converted
- * @neg - sign of the number, %TRUE for negative
+ * @buf: the user buffer
+ * @size: the size of the user buffer
+ * @val: the integer to be converted
+ * @neg: sign of the number, %TRUE for negative
  *
- * In case of success 0 is returned and buf and size are updated with
- * the amount of bytes read.
+ * In case of success %0 is returned and @buf and @size are updated with
+ * the amount of bytes written.
  */
 static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
 			  bool neg)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 756d728..8a76339 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3309,12 +3309,12 @@
 					size_t len,
 					unsigned int flags)
 {
-	struct page *pages[PIPE_BUFFERS];
-	struct partial_page partial[PIPE_BUFFERS];
+	struct page *pages_def[PIPE_DEF_BUFFERS];
+	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 	struct trace_iterator *iter = filp->private_data;
 	struct splice_pipe_desc spd = {
-		.pages		= pages,
-		.partial	= partial,
+		.pages		= pages_def,
+		.partial	= partial_def,
 		.nr_pages	= 0, /* This gets updated below. */
 		.flags		= flags,
 		.ops		= &tracing_pipe_buf_ops,
@@ -3325,6 +3325,9 @@
 	size_t rem;
 	unsigned int i;
 
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
+
 	/* copy the tracer to avoid using a global lock all around */
 	mutex_lock(&trace_types_lock);
 	if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3355,23 +3358,23 @@
 	trace_access_lock(iter->cpu_file);
 
 	/* Fill as many pages as possible. */
-	for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
-		pages[i] = alloc_page(GFP_KERNEL);
-		if (!pages[i])
+	for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
+		spd.pages[i] = alloc_page(GFP_KERNEL);
+		if (!spd.pages[i])
 			break;
 
 		rem = tracing_fill_pipe_page(rem, iter);
 
 		/* Copy the data into the page, so we can start over. */
 		ret = trace_seq_to_buffer(&iter->seq,
-					  page_address(pages[i]),
+					  page_address(spd.pages[i]),
 					  iter->seq.len);
 		if (ret < 0) {
-			__free_page(pages[i]);
+			__free_page(spd.pages[i]);
 			break;
 		}
-		partial[i].offset = 0;
-		partial[i].len = iter->seq.len;
+		spd.partial[i].offset = 0;
+		spd.partial[i].len = iter->seq.len;
 
 		trace_seq_init(&iter->seq);
 	}
@@ -3382,12 +3385,14 @@
 
 	spd.nr_pages = i;
 
-	return splice_to_pipe(pipe, &spd);
+	ret = splice_to_pipe(pipe, &spd);
+out:
+	splice_shrink_spd(pipe, &spd);
+	return ret;
 
 out_err:
 	mutex_unlock(&iter->mutex);
-
-	return ret;
+	goto out;
 }
 
 static ssize_t
@@ -3786,11 +3791,11 @@
 			    unsigned int flags)
 {
 	struct ftrace_buffer_info *info = file->private_data;
-	struct partial_page partial[PIPE_BUFFERS];
-	struct page *pages[PIPE_BUFFERS];
+	struct partial_page partial_def[PIPE_DEF_BUFFERS];
+	struct page *pages_def[PIPE_DEF_BUFFERS];
 	struct splice_pipe_desc spd = {
-		.pages		= pages,
-		.partial	= partial,
+		.pages		= pages_def,
+		.partial	= partial_def,
 		.flags		= flags,
 		.ops		= &buffer_pipe_buf_ops,
 		.spd_release	= buffer_spd_release,
@@ -3799,22 +3804,28 @@
 	int entries, size, i;
 	size_t ret;
 
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
+
 	if (*ppos & (PAGE_SIZE - 1)) {
 		WARN_ONCE(1, "Ftrace: previous read must page-align\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (len & (PAGE_SIZE - 1)) {
 		WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
-		if (len < PAGE_SIZE)
-			return -EINVAL;
+		if (len < PAGE_SIZE) {
+			ret = -EINVAL;
+			goto out;
+		}
 		len &= PAGE_MASK;
 	}
 
 	trace_access_lock(info->cpu);
 	entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
 
-	for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
+	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
 		struct page *page;
 		int r;
 
@@ -3869,11 +3880,12 @@
 		else
 			ret = 0;
 		/* TODO: block */
-		return ret;
+		goto out;
 	}
 
 	ret = splice_to_pipe(pipe, &spd);
-
+	splice_shrink_spd(pipe, &spd);
+out:
 	return ret;
 }
 
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 076c7c8..b2d70d3 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -54,8 +54,8 @@
 #endif
 	/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
 
-	/* alloc_uid() incremented the userns refcount.  Just set it to 1 */
-	kref_set(&ns->kref, 1);
+	/* root_user holds a reference to ns, our reference can be dropped */
+	put_user_ns(ns);
 
 	return 0;
 }
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7..43cb93f 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@
 	bool
 
 menuconfig KGDB
-	bool "KGDB: kernel debugging with remote gdb"
+	bool "KGDB: kernel debugger"
 	depends on HAVE_ARCH_KGDB
 	depends on DEBUG_KERNEL && EXPERIMENTAL
 	help
@@ -57,4 +57,26 @@
 	  information about other strings you could use beyond the
 	  default of V1F100.
 
+config KGDB_LOW_LEVEL_TRAP
+       bool "KGDB: Allow debugging with traps in notifiers"
+       depends on X86 || MIPS
+       default n
+       help
+         This will add an extra call back to kgdb for the breakpoint
+         exception handler on which will will allow kgdb to step
+         through a notify handler.
+
+config KGDB_KDB
+	bool "KGDB_KDB: include kdb frontend for kgdb"
+	default n
+	help
+	  KDB frontend for kernel
+
+config KDB_KEYBOARD
+	bool "KGDB_KDB: keyboard as input device"
+	depends on VT && KGDB_KDB
+	default n
+	help
+	  KDB can use a PS/2 type keyboard for an input device
+
 endif # KGDB
diff --git a/lib/kobject.c b/lib/kobject.c
index 8115eb1..f07c572 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -850,6 +850,121 @@
 }
 EXPORT_SYMBOL_GPL(kset_create_and_add);
 
+
+static DEFINE_SPINLOCK(kobj_ns_type_lock);
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+{
+	enum kobj_ns_type type = ops->type;
+	int error;
+
+	spin_lock(&kobj_ns_type_lock);
+
+	error = -EINVAL;
+	if (type >= KOBJ_NS_TYPES)
+		goto out;
+
+	error = -EINVAL;
+	if (type <= KOBJ_NS_TYPE_NONE)
+		goto out;
+
+	error = -EBUSY;
+	if (kobj_ns_ops_tbl[type])
+		goto out;
+
+	error = 0;
+	kobj_ns_ops_tbl[type] = ops;
+
+out:
+	spin_unlock(&kobj_ns_type_lock);
+	return error;
+}
+
+int kobj_ns_type_registered(enum kobj_ns_type type)
+{
+	int registered = 0;
+
+	spin_lock(&kobj_ns_type_lock);
+	if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
+		registered = kobj_ns_ops_tbl[type] != NULL;
+	spin_unlock(&kobj_ns_type_lock);
+
+	return registered;
+}
+
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
+{
+	const struct kobj_ns_type_operations *ops = NULL;
+
+	if (parent && parent->ktype->child_ns_type)
+		ops = parent->ktype->child_ns_type(parent);
+
+	return ops;
+}
+
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
+{
+	return kobj_child_ns_ops(kobj->parent);
+}
+
+
+const void *kobj_ns_current(enum kobj_ns_type type)
+{
+	const void *ns = NULL;
+
+	spin_lock(&kobj_ns_type_lock);
+	if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+	    kobj_ns_ops_tbl[type])
+		ns = kobj_ns_ops_tbl[type]->current_ns();
+	spin_unlock(&kobj_ns_type_lock);
+
+	return ns;
+}
+
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
+{
+	const void *ns = NULL;
+
+	spin_lock(&kobj_ns_type_lock);
+	if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+	    kobj_ns_ops_tbl[type])
+		ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
+	spin_unlock(&kobj_ns_type_lock);
+
+	return ns;
+}
+
+const void *kobj_ns_initial(enum kobj_ns_type type)
+{
+	const void *ns = NULL;
+
+	spin_lock(&kobj_ns_type_lock);
+	if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+	    kobj_ns_ops_tbl[type])
+		ns = kobj_ns_ops_tbl[type]->initial_ns();
+	spin_unlock(&kobj_ns_type_lock);
+
+	return ns;
+}
+
+/*
+ * kobj_ns_exit - invalidate a namespace tag
+ *
+ * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
+ * @ns: the actual namespace being invalidated
+ *
+ * This is called when a tag is no longer valid.  For instance,
+ * when a network namespace exits, it uses this helper to
+ * make sure no sb's sysfs_info points to the now-invalidated
+ * netns.
+ */
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
+{
+	sysfs_exit_ns(type, ns);
+}
+
+
 EXPORT_SYMBOL(kobject_get);
 EXPORT_SYMBOL(kobject_put);
 EXPORT_SYMBOL(kobject_del);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7b48d44..59c1551 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -19,18 +19,24 @@
 #include <linux/kobject.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-
+#include <linux/user_namespace.h>
 #include <linux/socket.h>
 #include <linux/skbuff.h>
 #include <linux/netlink.h>
 #include <net/sock.h>
+#include <net/net_namespace.h>
 
 
 u64 uevent_seqnum;
 char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
 static DEFINE_SPINLOCK(sequence_lock);
-#if defined(CONFIG_NET)
-static struct sock *uevent_sock;
+#ifdef CONFIG_NET
+struct uevent_sock {
+	struct list_head list;
+	struct sock *sk;
+};
+static LIST_HEAD(uevent_sock_list);
+static DEFINE_MUTEX(uevent_sock_mutex);
 #endif
 
 /* the strings here must match the enum in include/linux/kobject.h */
@@ -77,6 +83,37 @@
 	return ret;
 }
 
+static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
+{
+	struct kobject *kobj = data;
+	const struct kobj_ns_type_operations *ops;
+
+	ops = kobj_ns_ops(kobj);
+	if (ops) {
+		const void *sock_ns, *ns;
+		ns = kobj->ktype->namespace(kobj);
+		sock_ns = ops->netlink_ns(dsk);
+		return sock_ns != ns;
+	}
+
+	return 0;
+}
+
+static int kobj_usermode_filter(struct kobject *kobj)
+{
+	const struct kobj_ns_type_operations *ops;
+
+	ops = kobj_ns_ops(kobj);
+	if (ops) {
+		const void *init_ns, *ns;
+		ns = kobj->ktype->namespace(kobj);
+		init_ns = ops->initial_ns();
+		return ns != init_ns;
+	}
+
+	return 0;
+}
+
 /**
  * kobject_uevent_env - send an uevent with environmental data
  *
@@ -100,6 +137,9 @@
 	u64 seq;
 	int i = 0;
 	int retval = 0;
+#ifdef CONFIG_NET
+	struct uevent_sock *ue_sk;
+#endif
 
 	pr_debug("kobject: '%s' (%p): %s\n",
 		 kobject_name(kobj), kobj, __func__);
@@ -211,7 +251,9 @@
 
 #if defined(CONFIG_NET)
 	/* send netlink message */
-	if (uevent_sock) {
+	mutex_lock(&uevent_sock_mutex);
+	list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+		struct sock *uevent_sock = ue_sk->sk;
 		struct sk_buff *skb;
 		size_t len;
 
@@ -233,18 +275,21 @@
 			}
 
 			NETLINK_CB(skb).dst_group = 1;
-			retval = netlink_broadcast(uevent_sock, skb, 0, 1,
-						   GFP_KERNEL);
+			retval = netlink_broadcast_filtered(uevent_sock, skb,
+							    0, 1, GFP_KERNEL,
+							    kobj_bcast_filter,
+							    kobj);
 			/* ENOBUFS should be handled in userspace */
 			if (retval == -ENOBUFS)
 				retval = 0;
 		} else
 			retval = -ENOMEM;
 	}
+	mutex_unlock(&uevent_sock_mutex);
 #endif
 
 	/* call uevent_helper, usually only enabled during early boot */
-	if (uevent_helper[0]) {
+	if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
 		char *argv [3];
 
 		argv [0] = uevent_helper;
@@ -320,18 +365,58 @@
 EXPORT_SYMBOL_GPL(add_uevent_var);
 
 #if defined(CONFIG_NET)
-static int __init kobject_uevent_init(void)
+static int uevent_net_init(struct net *net)
 {
-	uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT,
-					    1, NULL, NULL, THIS_MODULE);
-	if (!uevent_sock) {
+	struct uevent_sock *ue_sk;
+
+	ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
+	if (!ue_sk)
+		return -ENOMEM;
+
+	ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
+					  1, NULL, NULL, THIS_MODULE);
+	if (!ue_sk->sk) {
 		printk(KERN_ERR
 		       "kobject_uevent: unable to create netlink socket!\n");
 		return -ENODEV;
 	}
-	netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
+	mutex_lock(&uevent_sock_mutex);
+	list_add_tail(&ue_sk->list, &uevent_sock_list);
+	mutex_unlock(&uevent_sock_mutex);
 	return 0;
 }
 
+static void uevent_net_exit(struct net *net)
+{
+	struct uevent_sock *ue_sk;
+
+	mutex_lock(&uevent_sock_mutex);
+	list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+		if (sock_net(ue_sk->sk) == net)
+			goto found;
+	}
+	mutex_unlock(&uevent_sock_mutex);
+	return;
+
+found:
+	list_del(&ue_sk->list);
+	mutex_unlock(&uevent_sock_mutex);
+
+	netlink_kernel_release(ue_sk->sk);
+	kfree(ue_sk);
+}
+
+static struct pernet_operations uevent_net_ops = {
+	.init	= uevent_net_init,
+	.exit	= uevent_net_exit,
+};
+
+static int __init kobject_uevent_init(void)
+{
+	netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
+	return register_pernet_subsys(&uevent_net_ops);
+}
+
+
 postcore_initcall(kobject_uevent_init);
 #endif
diff --git a/lib/kref.c b/lib/kref.c
index 6d19f69..d3d227a 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -16,23 +16,13 @@
 #include <linux/slab.h>
 
 /**
- * kref_set - initialize object and set refcount to requested number.
- * @kref: object in question.
- * @num: initial reference counter
- */
-void kref_set(struct kref *kref, int num)
-{
-	atomic_set(&kref->refcount, num);
-	smp_mb();
-}
-
-/**
  * kref_init - initialize object.
  * @kref: object in question.
  */
 void kref_init(struct kref *kref)
 {
-	kref_set(kref, 1);
+	atomic_set(&kref->refcount, 1);
+	smp_mb();
 }
 
 /**
@@ -72,7 +62,6 @@
 	return 0;
 }
 
-EXPORT_SYMBOL(kref_set);
 EXPORT_SYMBOL(kref_init);
 EXPORT_SYMBOL(kref_get);
 EXPORT_SYMBOL(kref_put);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 707d0dc..660a87a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -48,7 +48,6 @@
 
 static int bdi_sync_supers(void *);
 static void sync_supers_timer_fn(unsigned long);
-static void arm_supers_timer(void);
 
 static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
 
@@ -252,7 +251,7 @@
 
 	init_timer(&sync_supers_timer);
 	setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
-	arm_supers_timer();
+	bdi_arm_supers_timer();
 
 	err = bdi_init(&default_backing_dev_info);
 	if (!err)
@@ -374,10 +373,13 @@
 	return 0;
 }
 
-static void arm_supers_timer(void)
+void bdi_arm_supers_timer(void)
 {
 	unsigned long next;
 
+	if (!dirty_writeback_interval)
+		return;
+
 	next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
 	mod_timer(&sync_supers_timer, round_jiffies_up(next));
 }
@@ -385,7 +387,7 @@
 static void sync_supers_timer_fn(unsigned long unused)
 {
 	wake_up_process(sync_supers_tsk);
-	arm_supers_timer();
+	bdi_arm_supers_timer();
 }
 
 static int bdi_forker_task(void *ptr)
@@ -428,7 +430,10 @@
 
 			spin_unlock_bh(&bdi_lock);
 			wait = msecs_to_jiffies(dirty_writeback_interval * 10);
-			schedule_timeout(wait);
+			if (wait)
+				schedule_timeout(wait);
+			else
+				schedule();
 			try_to_freeze();
 			continue;
 		}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b19943..b289310 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@
 	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
 			       + global_page_state(NR_UNSTABLE_NFS))
 					  > background_thresh)))
-		bdi_start_writeback(bdi, NULL, 0);
+		bdi_start_writeback(bdi, NULL, 0, 0);
 }
 
 void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -683,10 +683,6 @@
         }
 }
 
-static void laptop_timer_fn(unsigned long unused);
-
-static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
-
 /*
  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
  */
@@ -694,24 +690,24 @@
 	void __user *buffer, size_t *length, loff_t *ppos)
 {
 	proc_dointvec(table, write, buffer, length, ppos);
+	bdi_arm_supers_timer();
 	return 0;
 }
 
-static void do_laptop_sync(struct work_struct *work)
+#ifdef CONFIG_BLOCK
+void laptop_mode_timer_fn(unsigned long data)
 {
-	wakeup_flusher_threads(0);
-	kfree(work);
-}
+	struct request_queue *q = (struct request_queue *)data;
+	int nr_pages = global_page_state(NR_FILE_DIRTY) +
+		global_page_state(NR_UNSTABLE_NFS);
 
-static void laptop_timer_fn(unsigned long unused)
-{
-	struct work_struct *work;
+	/*
+	 * We want to write everything out, not just down to the dirty
+	 * threshold
+	 */
 
-	work = kmalloc(sizeof(*work), GFP_ATOMIC);
-	if (work) {
-		INIT_WORK(work, do_laptop_sync);
-		schedule_work(work);
-	}
+	if (bdi_has_dirty_io(&q->backing_dev_info))
+		bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0);
 }
 
 /*
@@ -719,9 +715,9 @@
  * of all dirty data a few seconds from now.  If the flush is already scheduled
  * then push it back - the user is still using the disk.
  */
-void laptop_io_completion(void)
+void laptop_io_completion(struct backing_dev_info *info)
 {
-	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
+	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 }
 
 /*
@@ -731,8 +727,16 @@
  */
 void laptop_sync_completion(void)
 {
-	del_timer(&laptop_mode_wb_timer);
+	struct backing_dev_info *bdi;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
+		del_timer(&bdi->laptop_mode_wb_timer);
+
+	rcu_read_unlock();
 }
+#endif
 
 /*
  * If ratelimit_pages is too high then we can get into dirty-data overload
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f5ccc47..03aa2d5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -139,7 +139,8 @@
 	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 	if (nr_blocks) {
 		err = blkdev_issue_discard(si->bdev, start_block,
-				nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+				nr_blocks, GFP_KERNEL,
+				BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 		if (err)
 			return err;
 		cond_resched();
@@ -150,7 +151,8 @@
 		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
 		err = blkdev_issue_discard(si->bdev, start_block,
-				nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+				nr_blocks, GFP_KERNEL,
+				BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 		if (err)
 			break;
 
@@ -189,7 +191,8 @@
 			start_block <<= PAGE_SHIFT - 9;
 			nr_blocks <<= PAGE_SHIFT - 9;
 			if (blkdev_issue_discard(si->bdev, start_block,
-				    nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
+				    nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
+							BLKDEV_IFL_BARRIER))
 				break;
 		}
 
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index dd321e3..486b8f3 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -659,7 +659,7 @@
  *
  * Returns the number of bytes read.
  */
-static ssize_t brforward_read(struct kobject *kobj,
+static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t off, size_t count)
 {
diff --git a/net/core/dev.c b/net/core/dev.c
index 6c82065..d273e4e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1002,15 +1002,10 @@
 		return err;
 
 rollback:
-	/* For now only devices in the initial network namespace
-	 * are in sysfs.
-	 */
-	if (net_eq(net, &init_net)) {
-		ret = device_rename(&dev->dev, dev->name);
-		if (ret) {
-			memcpy(dev->name, oldname, IFNAMSIZ);
-			return ret;
-		}
+	ret = device_rename(&dev->dev, dev->name);
+	if (ret) {
+		memcpy(dev->name, oldname, IFNAMSIZ);
+		return ret;
 	}
 
 	write_lock_bh(&dev_base_lock);
@@ -4994,8 +4989,6 @@
 	if (dev->features & NETIF_F_SG)
 		dev->features |= NETIF_F_GSO;
 
-	netdev_initialize_kobject(dev);
-
 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
 	ret = notifier_to_errno(ret);
 	if (ret)
@@ -5547,15 +5540,6 @@
 	if (dev->features & NETIF_F_NETNS_LOCAL)
 		goto out;
 
-#ifdef CONFIG_SYSFS
-	/* Don't allow real devices to be moved when sysfs
-	 * is enabled.
-	 */
-	err = -EINVAL;
-	if (dev->dev.parent)
-		goto out;
-#endif
-
 	/* Ensure the device has been registrered */
 	err = -EINVAL;
 	if (dev->reg_state != NETREG_REGISTERED)
@@ -5606,8 +5590,6 @@
 	dev_uc_flush(dev);
 	dev_mc_flush(dev);
 
-	netdev_unregister_kobject(dev);
-
 	/* Actually switch the network namespace */
 	dev_net_set(dev, net);
 
@@ -5620,7 +5602,7 @@
 	}
 
 	/* Fixup kobjects */
-	err = netdev_register_kobject(dev);
+	err = device_rename(&dev->dev, dev->name);
 	WARN_ON(err);
 
 	/* Add the device back in the hashes */
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c57c4b2..99e7052 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -14,7 +14,9 @@
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
 #include <linux/slab.h>
+#include <linux/nsproxy.h>
 #include <net/sock.h>
+#include <net/net_namespace.h>
 #include <linux/rtnetlink.h>
 #include <linux/wireless.h>
 #include <linux/vmalloc.h>
@@ -467,6 +469,7 @@
 	.attrs = wireless_attrs,
 };
 #endif
+#endif /* CONFIG_SYSFS */
 
 #ifdef CONFIG_RPS
 /*
@@ -766,7 +769,38 @@
 	kset_unregister(net->queues_kset);
 }
 #endif /* CONFIG_RPS */
-#endif /* CONFIG_SYSFS */
+
+static const void *net_current_ns(void)
+{
+	return current->nsproxy->net_ns;
+}
+
+static const void *net_initial_ns(void)
+{
+	return &init_net;
+}
+
+static const void *net_netlink_ns(struct sock *sk)
+{
+	return sock_net(sk);
+}
+
+static struct kobj_ns_type_operations net_ns_type_operations = {
+	.type = KOBJ_NS_TYPE_NET,
+	.current_ns = net_current_ns,
+	.netlink_ns = net_netlink_ns,
+	.initial_ns = net_initial_ns,
+};
+
+static void net_kobj_ns_exit(struct net *net)
+{
+	kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
+}
+
+static struct pernet_operations kobj_net_ops = {
+	.exit = net_kobj_ns_exit,
+};
+
 
 #ifdef CONFIG_HOTPLUG
 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
@@ -774,9 +808,6 @@
 	struct net_device *dev = to_net_dev(d);
 	int retval;
 
-	if (!net_eq(dev_net(dev), &init_net))
-		return 0;
-
 	/* pass interface to uevent. */
 	retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
 	if (retval)
@@ -806,6 +837,13 @@
 	kfree((char *)dev - dev->padded);
 }
 
+static const void *net_namespace(struct device *d)
+{
+	struct net_device *dev;
+	dev = container_of(d, struct net_device, dev);
+	return dev_net(dev);
+}
+
 static struct class net_class = {
 	.name = "net",
 	.dev_release = netdev_release,
@@ -815,6 +853,8 @@
 #ifdef CONFIG_HOTPLUG
 	.dev_uevent = netdev_uevent,
 #endif
+	.ns_type = &net_ns_type_operations,
+	.namespace = net_namespace,
 };
 
 /* Delete sysfs entries but hold kobject reference until after all
@@ -826,9 +866,6 @@
 
 	kobject_get(&dev->kobj);
 
-	if (!net_eq(dev_net(net), &init_net))
-		return;
-
 #ifdef CONFIG_RPS
 	rx_queue_remove_kobjects(net);
 #endif
@@ -843,6 +880,7 @@
 	const struct attribute_group **groups = net->sysfs_groups;
 	int error = 0;
 
+	device_initialize(dev);
 	dev->class = &net_class;
 	dev->platform_data = net;
 	dev->groups = groups;
@@ -865,9 +903,6 @@
 #endif
 #endif /* CONFIG_SYSFS */
 
-	if (!net_eq(dev_net(net), &init_net))
-		return 0;
-
 	error = device_add(dev);
 	if (error)
 		return error;
@@ -896,13 +931,9 @@
 EXPORT_SYMBOL(netdev_class_create_file);
 EXPORT_SYMBOL(netdev_class_remove_file);
 
-void netdev_initialize_kobject(struct net_device *net)
-{
-	struct device *device = &(net->dev);
-	device_initialize(device);
-}
-
 int netdev_kobject_init(void)
 {
+	kobj_ns_type_register(&net_ns_type_operations);
+	register_pernet_subsys(&kobj_net_ops);
 	return class_register(&net_class);
 }
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 14e7524..805555e 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,5 +4,4 @@
 int netdev_kobject_init(void);
 int netdev_register_kobject(struct net_device *);
 void netdev_unregister_kobject(struct net_device *);
-void netdev_initialize_kobject(struct net_device *);
 #endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c543dd2..66d9c41 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1406,12 +1406,13 @@
 /*
  * Fill page/offset/length into spd, if it can hold more pages.
  */
-static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+static inline int spd_fill_page(struct splice_pipe_desc *spd,
+				struct pipe_inode_info *pipe, struct page *page,
 				unsigned int *len, unsigned int offset,
 				struct sk_buff *skb, int linear,
 				struct sock *sk)
 {
-	if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+	if (unlikely(spd->nr_pages == pipe->buffers))
 		return 1;
 
 	if (linear) {
@@ -1447,7 +1448,8 @@
 				   unsigned int plen, unsigned int *off,
 				   unsigned int *len, struct sk_buff *skb,
 				   struct splice_pipe_desc *spd, int linear,
-				   struct sock *sk)
+				   struct sock *sk,
+				   struct pipe_inode_info *pipe)
 {
 	if (!*len)
 		return 1;
@@ -1470,7 +1472,7 @@
 		/* the linear region may spread across several pages  */
 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
-		if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
+		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
 			return 1;
 
 		__segment_seek(&page, &poff, &plen, flen);
@@ -1485,9 +1487,9 @@
  * Map linear and fragment data from the skb to spd. It reports failure if the
  * pipe is full or if we already spliced the requested length.
  */
-static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
-			     unsigned int *len, struct splice_pipe_desc *spd,
-			     struct sock *sk)
+static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+			     unsigned int *offset, unsigned int *len,
+			     struct splice_pipe_desc *spd, struct sock *sk)
 {
 	int seg;
 
@@ -1497,7 +1499,7 @@
 	if (__splice_segment(virt_to_page(skb->data),
 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
 			     skb_headlen(skb),
-			     offset, len, skb, spd, 1, sk))
+			     offset, len, skb, spd, 1, sk, pipe))
 		return 1;
 
 	/*
@@ -1507,7 +1509,7 @@
 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
 
 		if (__splice_segment(f->page, f->page_offset, f->size,
-				     offset, len, skb, spd, 0, sk))
+				     offset, len, skb, spd, 0, sk, pipe))
 			return 1;
 	}
 
@@ -1524,8 +1526,8 @@
 		    struct pipe_inode_info *pipe, unsigned int tlen,
 		    unsigned int flags)
 {
-	struct partial_page partial[PIPE_BUFFERS];
-	struct page *pages[PIPE_BUFFERS];
+	struct partial_page partial[PIPE_DEF_BUFFERS];
+	struct page *pages[PIPE_DEF_BUFFERS];
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.partial = partial,
@@ -1535,12 +1537,16 @@
 	};
 	struct sk_buff *frag_iter;
 	struct sock *sk = skb->sk;
+	int ret = 0;
+
+	if (splice_grow_spd(pipe, &spd))
+		return -ENOMEM;
 
 	/*
 	 * __skb_splice_bits() only fails if the output has no room left,
 	 * so no point in going over the frag_list for the error case.
 	 */
-	if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
 		goto done;
 	else if (!tlen)
 		goto done;
@@ -1551,14 +1557,12 @@
 	skb_walk_frags(skb, frag_iter) {
 		if (!tlen)
 			break;
-		if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
+		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
 			break;
 	}
 
 done:
 	if (spd.nr_pages) {
-		int ret;
-
 		/*
 		 * Drop the socket lock, otherwise we have reverse
 		 * locking dependencies between sk_lock and i_mutex
@@ -1571,10 +1575,10 @@
 		release_sock(sk);
 		ret = splice_to_pipe(pipe, &spd);
 		lock_sock(sk);
-		return ret;
 	}
 
-	return 0;
+	splice_shrink_spd(pipe, &spd);
+	return ret;
 }
 
 /**
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6464a19..a2eb965 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -978,6 +978,8 @@
 	int delivered;
 	gfp_t allocation;
 	struct sk_buff *skb, *skb2;
+	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
+	void *tx_data;
 };
 
 static inline int do_one_broadcast(struct sock *sk,
@@ -1020,6 +1022,9 @@
 		p->failure = 1;
 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
 			p->delivery_failure = 1;
+	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+		kfree_skb(p->skb2);
+		p->skb2 = NULL;
 	} else if (sk_filter(sk, p->skb2)) {
 		kfree_skb(p->skb2);
 		p->skb2 = NULL;
@@ -1038,8 +1043,10 @@
 	return 0;
 }
 
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
-		      u32 group, gfp_t allocation)
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
+	u32 group, gfp_t allocation,
+	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+	void *filter_data)
 {
 	struct net *net = sock_net(ssk);
 	struct netlink_broadcast_data info;
@@ -1059,6 +1066,8 @@
 	info.allocation = allocation;
 	info.skb = skb;
 	info.skb2 = NULL;
+	info.tx_filter = filter;
+	info.tx_data = filter_data;
 
 	/* While we sleep in clone, do not allow to change socket list */
 
@@ -1083,6 +1092,14 @@
 	}
 	return -ESRCH;
 }
+EXPORT_SYMBOL(netlink_broadcast_filtered);
+
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
+		      u32 group, gfp_t allocation)
+{
+	return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
+		NULL, NULL);
+}
 EXPORT_SYMBOL(netlink_broadcast);
 
 struct netlink_set_err_data {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a4d7434..f2bbea9 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2656,6 +2656,7 @@
 # check for semaphores used as mutexes
 		if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
 			WARN("consider using a completion\n" . $herecurr);
+
 		}
 # recommend strict_strto* over simple_strto*
 		if ($line =~ /\bsimple_(strto.*?)\s*\(/) {
@@ -2740,6 +2741,16 @@
 				WARN("use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
 			}
 		}
+
+# check for lockdep_set_novalidate_class
+		if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
+		    $line =~ /__lockdep_no_validate__\s*\)/ ) {
+			if ($realfile !~ m@^kernel/lockdep@ &&
+			    $realfile !~ m@^include/linux/lockdep@ &&
+			    $realfile !~ m@^drivers/base/core@) {
+				ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
+			}
+		}
 	}
 
 	# If we have no input at all, then there is nothing to report on
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 2dc2d65..7625b85 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -94,7 +94,7 @@
 		       iint->opencount);
 		iint->opencount = 0;
 	}
-	kref_set(&iint->refcount, 1);
+	kref_init(&iint->refcount);
 	kmem_cache_free(iint_cache, iint);
 }
 
@@ -133,7 +133,7 @@
 	iint->readcount = 0;
 	iint->writecount = 0;
 	iint->opencount = 0;
-	kref_set(&iint->refcount, 1);
+	kref_init(&iint->refcount);
 }
 
 static int __init ima_iintcache_init(void)
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 6776d1c..7e267c9 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -116,12 +116,9 @@
 	mutex_destroy(&rt->line_in_notify.mutex);
 	mutex_destroy(&rt->line_out_notify.mutex);
 
-	if (rt->headphone_notify.gpio_private)
-		kfree(rt->headphone_notify.gpio_private);
-	if (rt->line_in_notify.gpio_private)
-		kfree(rt->line_in_notify.gpio_private);
-	if (rt->line_out_notify.gpio_private)
-		kfree(rt->line_out_notify.gpio_private);
+	kfree(rt->headphone_notify.gpio_private);
+	kfree(rt->line_in_notify.gpio_private);
+	kfree(rt->line_out_notify.gpio_private);
 }
 
 static void pmf_handle_notify_irq(void *data)